From 77a5865a22d290033aec1894b2c79e688f713238 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 13 Jan 2024 22:30:30 +0100 Subject: [PATCH 001/566] Adding FP16 --- base/base/BFloat16.h | 9 ++++++++ base/base/DecomposedFloat.h | 10 +++++++++ base/base/TypeLists.h | 2 +- base/base/TypeName.h | 1 + base/base/extended_types.h | 20 ++++++++++++++--- base/base/wide_integer.h | 2 +- base/base/wide_integer_impl.h | 8 ++++++- .../AggregateFunctionGroupArray.cpp | 4 ++-- .../AggregateFunctionGroupArrayMoving.cpp | 2 +- .../AggregateFunctionIntervalLengthSum.cpp | 4 ++-- .../AggregateFunctionSparkbar.cpp | 6 ++--- src/AggregateFunctions/AggregateFunctionSum.h | 6 ++--- .../AggregateFunctionUniqCombined.h | 2 +- src/AggregateFunctions/QuantileTDigest.h | 2 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- .../ReservoirSamplerDeterministic.h | 2 +- src/Columns/ColumnArray.cpp | 4 ++++ src/Columns/ColumnNullable.cpp | 2 ++ src/Columns/ColumnVector.cpp | 13 ++++++----- src/Columns/ColumnVector.h | 1 + src/Columns/ColumnsCommon.cpp | 1 + src/Columns/ColumnsNumber.h | 1 + src/Columns/MaskOperations.cpp | 2 ++ src/Columns/tests/gtest_column_vector.cpp | 1 + src/Columns/tests/gtest_low_cardinality.cpp | 1 + src/Common/FieldVisitorConvertToNumber.h | 4 ++-- src/Common/HashTable/Hash.h | 1 + src/Common/HashTable/HashTable.h | 2 +- src/Common/NaNUtils.h | 6 ++--- src/Common/findExtreme.h | 2 +- src/Common/transformEndianness.h | 2 +- src/Core/AccurateComparison.h | 18 +++++++-------- src/Core/DecimalFunctions.h | 2 +- src/Core/Field.h | 1 + src/Core/SortCursor.h | 1 + src/Core/TypeId.h | 2 ++ src/Core/Types_fwd.h | 7 +----- src/Core/callOnTypeIndex.h | 3 +++ src/DataTypes/DataTypeNumberBase.cpp | 1 + src/DataTypes/DataTypeNumberBase.h | 1 + src/DataTypes/DataTypesDecimal.h | 5 +++-- src/DataTypes/DataTypesNumber.cpp | 1 + src/DataTypes/DataTypesNumber.h | 1 + src/DataTypes/IDataType.h | 5 ++++- src/DataTypes/NumberTraits.h | 22 +++++++++---------- .../Serializations/SerializationNumber.cpp | 1 + src/DataTypes/Utils.cpp | 7 ++++++ src/DataTypes/getLeastSupertype.cpp | 6 ++++- src/DataTypes/getMostSubtype.cpp | 6 ++++- src/Formats/ProtobufSerializer.cpp | 2 +- src/Functions/DivisionUtils.h | 16 +++++++------- src/Functions/FunctionMathUnary.h | 4 ++-- src/Functions/FunctionsConversion.h | 12 +++++----- src/Functions/FunctionsRound.h | 2 +- src/Functions/factorial.cpp | 2 +- src/Functions/minus.cpp | 4 ++-- src/Functions/moduloOrZero.cpp | 2 +- src/Functions/multiply.cpp | 4 ++-- src/Functions/plus.cpp | 4 ++-- src/Functions/sign.cpp | 2 +- src/IO/ReadHelpers.h | 4 +++- src/IO/WriteHelpers.h | 22 +++++++++++-------- src/Interpreters/RowRefs.cpp | 2 +- 63 files changed, 192 insertions(+), 105 deletions(-) create mode 100644 base/base/BFloat16.h diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h new file mode 100644 index 00000000000..17c3ebe9ef3 --- /dev/null +++ b/base/base/BFloat16.h @@ -0,0 +1,9 @@ +#pragma once + +using BFloat16 = __bf16; + +namespace std +{ + inline constexpr bool isfinite(BFloat16) { return true; } + inline constexpr bool signbit(BFloat16) { return false; } +} diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index f152637b94e..fda7ee8d3f4 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -10,6 +10,15 @@ template struct FloatTraits; +template <> +struct FloatTraits<__bf16> +{ + using UInt = uint16_t; + static constexpr size_t bits = 16; + static constexpr size_t exponent_bits = 8; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + template <> struct FloatTraits { @@ -217,3 +226,4 @@ struct DecomposedFloat using DecomposedFloat64 = DecomposedFloat; using DecomposedFloat32 = DecomposedFloat; +using DecomposedFloat16 = DecomposedFloat<__bf16>; diff --git a/base/base/TypeLists.h b/base/base/TypeLists.h index 6c1283d054c..ce3111b1da3 100644 --- a/base/base/TypeLists.h +++ b/base/base/TypeLists.h @@ -9,7 +9,7 @@ namespace DB { using TypeListNativeInt = TypeList; -using TypeListFloat = TypeList; +using TypeListFloat = TypeList; using TypeListNativeNumber = TypeListConcat; using TypeListWideInt = TypeList; using TypeListInt = TypeListConcat; diff --git a/base/base/TypeName.h b/base/base/TypeName.h index 9005b5a2bf4..1f4b475d653 100644 --- a/base/base/TypeName.h +++ b/base/base/TypeName.h @@ -32,6 +32,7 @@ TN_MAP(Int32) TN_MAP(Int64) TN_MAP(Int128) TN_MAP(Int256) +TN_MAP(BFloat16) TN_MAP(Float32) TN_MAP(Float64) TN_MAP(String) diff --git a/base/base/extended_types.h b/base/base/extended_types.h index b58df45a97e..39665784141 100644 --- a/base/base/extended_types.h +++ b/base/base/extended_types.h @@ -4,6 +4,8 @@ #include #include +#include + using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; @@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming) template <> struct is_signed { static constexpr bool value = true; }; template <> struct is_signed { static constexpr bool value = true; }; +template <> struct is_signed { static constexpr bool value = true; }; template inline constexpr bool is_signed_v = is_signed::value; @@ -47,8 +50,6 @@ template concept is_integer = || std::is_same_v || std::is_same_v; -template concept is_floating_point = std::is_floating_point_v; - template struct is_arithmetic // NOLINT(readability-identifier-naming) { @@ -59,11 +60,24 @@ template <> struct is_arithmetic { static constexpr bool value = true; } template <> struct is_arithmetic { static constexpr bool value = true; }; template <> struct is_arithmetic { static constexpr bool value = true; }; template <> struct is_arithmetic { static constexpr bool value = true; }; - +template <> struct is_arithmetic { static constexpr bool value = true; }; template inline constexpr bool is_arithmetic_v = is_arithmetic::value; + +template +struct is_floating_point // NOLINT(readability-identifier-naming) +{ + static constexpr bool value = std::is_floating_point_v; +}; + +template <> struct is_floating_point { static constexpr bool value = true; }; + +template +inline constexpr bool is_floating_point_v = is_floating_point::value; + + template struct make_unsigned // NOLINT(readability-identifier-naming) { diff --git a/base/base/wide_integer.h b/base/base/wide_integer.h index ffd30460c03..877ef5bd137 100644 --- a/base/base/wide_integer.h +++ b/base/base/wide_integer.h @@ -117,6 +117,7 @@ public: constexpr operator long double() const noexcept; constexpr operator double() const noexcept; constexpr operator float() const noexcept; + constexpr operator __bf16() const noexcept; struct _impl; @@ -262,4 +263,3 @@ struct hash>; // NOLINTEND(*) #include "wide_integer_impl.h" - diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index c1fd7b69b7f..7b95164e44d 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - std::is_floating_point_v, + is_floating_point_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, @@ -1291,6 +1291,12 @@ constexpr integer::operator float() const noexcept return static_cast(static_cast(*this)); } +template +constexpr integer::operator __bf16() const noexcept +{ + return static_cast<__bf16>(static_cast(*this)); +} + // Unary operators template constexpr integer operator~(const integer & lhs) noexcept diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 6c6397e35d5..bcefa6b93dc 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -74,7 +74,7 @@ template struct GroupArraySamplerData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; @@ -116,7 +116,7 @@ template struct GroupArrayNumericData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index 026b8d1956f..ee6a82686c5 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -38,7 +38,7 @@ template struct MovingData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); using Accumulator = T; diff --git a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp index eacd0596757..06156643aa0 100644 --- a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp @@ -187,7 +187,7 @@ public: static DataTypePtr createResultType() { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::make_shared(); return std::make_shared(); } @@ -227,7 +227,7 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); else assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index b6e538520a8..f4214f3a133 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -50,7 +50,7 @@ struct AggregateFunctionSparkbarData auto [it, inserted] = points.insert({x, y}); if (!inserted) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { it->getMapped() += y; return it->getMapped(); @@ -197,7 +197,7 @@ private: Y res; bool has_overfllow = false; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) res = histogram[index] + point.getMapped(); else has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res); @@ -246,7 +246,7 @@ private: } constexpr auto levels_num = static_cast(BAR_LEVELS - 1); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { y = y / (y_max / levels_num) + 1; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 5781ab69c6b..81df3244b38 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -69,7 +69,7 @@ struct AggregateFunctionSumData size_t count = end - start; const auto * end_ptr = ptr + count; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// Compiler cannot unroll this loop, do it manually. /// (at least for floats, most likely due to the lack of -fassociative-math) @@ -164,7 +164,7 @@ struct AggregateFunctionSumData return; } - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) @@ -277,7 +277,7 @@ struct AggregateFunctionSumData template struct AggregateFunctionSumKahanData { - static_assert(std::is_floating_point_v, + static_assert(is_floating_point_v, "It doesn't make sense to use Kahan Summation algorithm for non floating point types"); T sum{}; diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h index 10774442610..19e2665f9af 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.h +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.h @@ -114,7 +114,7 @@ public: /// Initially UInt128 was introduced only for UUID, and then the other big-integer types were added. hash = static_cast(sipHash64(value)); } - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { hash = static_cast(intHash64(bit_cast(value))); } diff --git a/src/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h index 979c3f2af15..1407b73e669 100644 --- a/src/AggregateFunctions/QuantileTDigest.h +++ b/src/AggregateFunctions/QuantileTDigest.h @@ -380,7 +380,7 @@ public: ResultType getImpl(Float64 level) { if (centroids.empty()) - return std::is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; + return is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; compress(); diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 37fc05a2e4c..242540102b8 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -278,6 +278,6 @@ private: if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index daed0b98ca3..75af6638183 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -273,7 +273,7 @@ private: if (OnEmpty == ReservoirSamplerDeterministicOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSamplerDeterministic"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 1cb8188bce6..4aaaf01e5ea 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -574,6 +574,8 @@ ColumnPtr ColumnArray::filter(const Filter & filt, ssize_t result_size_hint) con return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) return filterNumber(filt, result_size_hint); + if (typeid_cast(data.get())) + return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) @@ -993,6 +995,8 @@ ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) return replicateNumber(replicate_offsets); + if (typeid_cast(data.get())) + return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 4ee6bb3d586..3513ac06dcd 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -171,6 +171,8 @@ StringRef ColumnNullable::serializeValueIntoArena(size_t n, Arena & arena, char return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Int256: return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); + case TypeIndex::BFloat16: + return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Float32: return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Float64: diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index b1cf449dfde..bad84e7147c 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -141,7 +141,7 @@ struct ColumnVector::less_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -173,7 +173,7 @@ struct ColumnVector::greater_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -259,7 +259,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction bool sort_is_stable = stability == IColumn::PermutationSortStability::Stable; /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !std::is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. if (data_size >= 256 && data_size <= std::numeric_limits::max() && use_radix_sort) @@ -286,7 +286,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (std::is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point_v && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -333,7 +333,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct if constexpr (is_arithmetic_v && !is_big_int_v) { /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !std::is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; size_t size = end - begin; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. @@ -356,7 +356,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (std::is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point_v && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -970,6 +970,7 @@ template class ColumnVector; template class ColumnVector; template class ColumnVector; template class ColumnVector; +template class ColumnVector; template class ColumnVector; template class ColumnVector; template class ColumnVector; diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index fab2d5f06aa..c976fac3bab 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -570,6 +570,7 @@ extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; +extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; diff --git a/src/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp index 4ac84e10750..444f5fae87a 100644 --- a/src/Columns/ColumnsCommon.cpp +++ b/src/Columns/ColumnsCommon.cpp @@ -328,6 +328,7 @@ INSTANTIATE(Int32) INSTANTIATE(Int64) INSTANTIATE(Int128) INSTANTIATE(Int256) +INSTANTIATE(BFloat16) INSTANTIATE(Float32) INSTANTIATE(Float64) INSTANTIATE(Decimal32) diff --git a/src/Columns/ColumnsNumber.h b/src/Columns/ColumnsNumber.h index ae7eddb0b22..2dce2269079 100644 --- a/src/Columns/ColumnsNumber.h +++ b/src/Columns/ColumnsNumber.h @@ -23,6 +23,7 @@ using ColumnInt64 = ColumnVector; using ColumnInt128 = ColumnVector; using ColumnInt256 = ColumnVector; +using ColumnBFloat16 = ColumnVector; using ColumnFloat32 = ColumnVector; using ColumnFloat64 = ColumnVector; diff --git a/src/Columns/MaskOperations.cpp b/src/Columns/MaskOperations.cpp index b84268356a7..ca4ca263811 100644 --- a/src/Columns/MaskOperations.cpp +++ b/src/Columns/MaskOperations.cpp @@ -63,6 +63,7 @@ INSTANTIATE(Int32) INSTANTIATE(Int64) INSTANTIATE(Int128) INSTANTIATE(Int256) +INSTANTIATE(BFloat16) INSTANTIATE(Float32) INSTANTIATE(Float64) INSTANTIATE(Decimal32) @@ -225,6 +226,7 @@ MaskInfo extractMaskImpl( || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) + || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot convert column {} to mask.", column->getName()); diff --git a/src/Columns/tests/gtest_column_vector.cpp b/src/Columns/tests/gtest_column_vector.cpp index b71d4a095ab..3a084a89079 100644 --- a/src/Columns/tests/gtest_column_vector.cpp +++ b/src/Columns/tests/gtest_column_vector.cpp @@ -93,6 +93,7 @@ TEST(ColumnVector, Filter) testFilter(); testFilter(); testFilter(); + testFilter(); testFilter(); testFilter(); testFilter(); diff --git a/src/Columns/tests/gtest_low_cardinality.cpp b/src/Columns/tests/gtest_low_cardinality.cpp index 5e01279b7df..965c0d219b9 100644 --- a/src/Columns/tests/gtest_low_cardinality.cpp +++ b/src/Columns/tests/gtest_low_cardinality.cpp @@ -45,6 +45,7 @@ TEST(ColumnLowCardinality, Insert) testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); + testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); } diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index bf8c8c8638e..38144650b97 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -58,7 +58,7 @@ public: T operator() (const Float64 & x) const { - if constexpr (!std::is_floating_point_v) + if constexpr (!is_floating_point_v) { if (!isFinite(x)) { @@ -88,7 +88,7 @@ public: template T operator() (const DecimalField & x) const { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return x.getValue().template convertTo() / x.getScaleMultiplier().template convertTo(); else return (x.getValue() / x.getScaleMultiplier()). template convertTo(); diff --git a/src/Common/HashTable/Hash.h b/src/Common/HashTable/Hash.h index fb6afcde133..b4bc6af1cef 100644 --- a/src/Common/HashTable/Hash.h +++ b/src/Common/HashTable/Hash.h @@ -322,6 +322,7 @@ DEFINE_HASH(Int32) DEFINE_HASH(Int64) DEFINE_HASH(Int128) DEFINE_HASH(Int256) +DEFINE_HASH(BFloat16) DEFINE_HASH(Float32) DEFINE_HASH(Float64) DEFINE_HASH(DB::UUID) diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index f23c4ca15dd..e4d5d3868c8 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -91,7 +91,7 @@ inline bool bitEquals(T && a, T && b) { using RealT = std::decay_t; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return 0 == memcmp(&a, &b, sizeof(RealT)); /// Note that memcmp with constant size is compiler builtin. else return a == b; diff --git a/src/Common/NaNUtils.h b/src/Common/NaNUtils.h index 1c5a619e919..6363e3e61a2 100644 --- a/src/Common/NaNUtils.h +++ b/src/Common/NaNUtils.h @@ -9,7 +9,7 @@ template inline bool isNaN(T x) { /// To be sure, that this function is zero-cost for non-floating point types. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::isnan(x); else return false; @@ -19,7 +19,7 @@ inline bool isNaN(T x) template inline bool isFinite(T x) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::isfinite(x); else return true; @@ -29,7 +29,7 @@ inline bool isFinite(T x) template T NaNOrZero() { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::numeric_limits::quiet_NaN(); else return {}; diff --git a/src/Common/findExtreme.h b/src/Common/findExtreme.h index b38c24697c0..611af023d33 100644 --- a/src/Common/findExtreme.h +++ b/src/Common/findExtreme.h @@ -11,7 +11,7 @@ namespace DB { template -concept is_any_native_number = (is_any_of); +concept is_any_native_number = (is_any_of); template std::optional findExtremeMin(const T * __restrict ptr, size_t start, size_t end); diff --git a/src/Common/transformEndianness.h b/src/Common/transformEndianness.h index 1657305acda..2a0c45efe38 100644 --- a/src/Common/transformEndianness.h +++ b/src/Common/transformEndianness.h @@ -38,7 +38,7 @@ inline void transformEndianness(T & x) } template -requires std::is_floating_point_v +requires is_floating_point_v inline void transformEndianness(T & value) { if constexpr (ToEndian != FromEndian) diff --git a/src/Core/AccurateComparison.h b/src/Core/AccurateComparison.h index a201c136e3a..82d06876fe3 100644 --- a/src/Core/AccurateComparison.h +++ b/src/Core/AccurateComparison.h @@ -25,7 +25,7 @@ bool lessOp(A a, B b) return a < b; /// float vs float - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) return a < b; /// anything vs NaN @@ -49,7 +49,7 @@ bool lessOp(A a, B b) } /// int vs float - if constexpr (is_integer && std::is_floating_point_v) + if constexpr (is_integer && is_floating_point_v) { if constexpr (sizeof(A) <= 4) return static_cast(a) < static_cast(b); @@ -57,7 +57,7 @@ bool lessOp(A a, B b) return DecomposedFloat(b).greater(a); } - if constexpr (std::is_floating_point_v && is_integer) + if constexpr (is_floating_point_v && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) < static_cast(b); @@ -65,8 +65,8 @@ bool lessOp(A a, B b) return DecomposedFloat(a).less(b); } - static_assert(is_integer || std::is_floating_point_v); - static_assert(is_integer || std::is_floating_point_v); + static_assert(is_integer || is_floating_point_v); + static_assert(is_integer || is_floating_point_v); UNREACHABLE(); } @@ -101,7 +101,7 @@ bool equalsOp(A a, B b) return a == b; /// float vs float - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) return a == b; /// anything vs NaN @@ -125,7 +125,7 @@ bool equalsOp(A a, B b) } /// int vs float - if constexpr (is_integer && std::is_floating_point_v) + if constexpr (is_integer && is_floating_point_v) { if constexpr (sizeof(A) <= 4) return static_cast(a) == static_cast(b); @@ -133,7 +133,7 @@ bool equalsOp(A a, B b) return DecomposedFloat(b).equals(a); } - if constexpr (std::is_floating_point_v && is_integer) + if constexpr (is_floating_point_v && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) == static_cast(b); @@ -163,7 +163,7 @@ inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result) return true; } - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) { /// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type. if (isNaN(value)) diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 8dad00c3a1e..c5bc4ad70f6 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -310,7 +310,7 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/Core/Field.h b/src/Core/Field.h index 6afa98ed9c0..be70eb1ea07 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -251,6 +251,7 @@ template <> struct NearestFieldTypeImpl> { using Type = template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; +template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = String; }; diff --git a/src/Core/SortCursor.h b/src/Core/SortCursor.h index 3c412fa1f17..a9dc90a8fa1 100644 --- a/src/Core/SortCursor.h +++ b/src/Core/SortCursor.h @@ -687,6 +687,7 @@ private: SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, + SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, diff --git a/src/Core/TypeId.h b/src/Core/TypeId.h index 9c634d2321c..73fa7da37e2 100644 --- a/src/Core/TypeId.h +++ b/src/Core/TypeId.h @@ -21,6 +21,7 @@ enum class TypeIndex Int64, Int128, Int256, + BFloat16, Float32, Float64, Date, @@ -91,6 +92,7 @@ TYPEID_MAP(Int32) TYPEID_MAP(Int64) TYPEID_MAP(Int128) TYPEID_MAP(Int256) +TYPEID_MAP(BFloat16) TYPEID_MAP(Float32) TYPEID_MAP(Float64) TYPEID_MAP(UUID) diff --git a/src/Core/Types_fwd.h b/src/Core/Types_fwd.h index a59e4b6eab8..2dffc910f9b 100644 --- a/src/Core/Types_fwd.h +++ b/src/Core/Types_fwd.h @@ -21,6 +21,7 @@ using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; using Int256 = wide::integer<256, signed>; using UInt256 = wide::integer<256, unsigned>; +using BFloat16 = __bf16; namespace DB { @@ -28,16 +29,10 @@ namespace DB using UUID = StrongTypedef; struct IPv4; - struct IPv6; struct Null; -using UInt128 = ::UInt128; -using UInt256 = ::UInt256; -using Int128 = ::Int128; -using Int256 = ::Int256; - enum class TypeIndex; /// Not a data type in database, defined just for convenience. diff --git a/src/Core/callOnTypeIndex.h b/src/Core/callOnTypeIndex.h index f5f67df563b..68aba2263c7 100644 --- a/src/Core/callOnTypeIndex.h +++ b/src/Core/callOnTypeIndex.h @@ -62,6 +62,7 @@ static bool callOnBasicType(TypeIndex number, F && f) { switch (number) { + case TypeIndex::BFloat16: return f(TypePair()); case TypeIndex::Float32: return f(TypePair()); case TypeIndex::Float64: return f(TypePair()); default: @@ -132,6 +133,7 @@ static inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F { switch (type_num1) { + case TypeIndex::BFloat16: return callOnBasicType(type_num2, std::forward(f)); case TypeIndex::Float32: return callOnBasicType(type_num2, std::forward(f)); case TypeIndex::Float64: return callOnBasicType(type_num2, std::forward(f)); default: @@ -189,6 +191,7 @@ static bool callOnIndexAndDataType(TypeIndex number, F && f, ExtraArgs && ... ar case TypeIndex::Int128: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Int256: return f(TypePair, T>(), std::forward(args)...); + case TypeIndex::BFloat16: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Float32: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Float64: return f(TypePair, T>(), std::forward(args)...); diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index be448fe1491..636d557f4d0 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -42,6 +42,7 @@ template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; +template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h index 3a5b11c5124..11b9427a14d 100644 --- a/src/DataTypes/DataTypeNumberBase.h +++ b/src/DataTypes/DataTypeNumberBase.h @@ -68,6 +68,7 @@ extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; +extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index e2b433cbe2f..12d061b11e5 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -209,9 +210,9 @@ inline ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & static constexpr bool throw_exception = std::is_same_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { - if (!std::isfinite(value)) + if (!isFinite(value)) { if constexpr (throw_exception) throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow. Cannot convert infinity or NaN to decimal", ToDataType::family_name); diff --git a/src/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp index 1c0c418411b..81c64df9711 100644 --- a/src/DataTypes/DataTypesNumber.cpp +++ b/src/DataTypes/DataTypesNumber.cpp @@ -54,6 +54,7 @@ void registerDataTypeNumbers(DataTypeFactory & factory) factory.registerDataType("Int32", createNumericDataType); factory.registerDataType("Int64", createNumericDataType); + factory.registerDataType("BFloat16", createNumericDataType); factory.registerDataType("Float32", createNumericDataType); factory.registerDataType("Float64", createNumericDataType); diff --git a/src/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h index 0c1f88a7925..1fe95f58e99 100644 --- a/src/DataTypes/DataTypesNumber.h +++ b/src/DataTypes/DataTypesNumber.h @@ -63,6 +63,7 @@ using DataTypeInt8 = DataTypeNumber; using DataTypeInt16 = DataTypeNumber; using DataTypeInt32 = DataTypeNumber; using DataTypeInt64 = DataTypeNumber; +using DataTypeBFloat16 = DataTypeNumber; using DataTypeFloat32 = DataTypeNumber; using DataTypeFloat64 = DataTypeNumber; diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index eabf066bc3d..ac71a61683a 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -372,9 +372,10 @@ struct WhichDataType constexpr bool isDecimal256() const { return idx == TypeIndex::Decimal256; } constexpr bool isDecimal() const { return isDecimal32() || isDecimal64() || isDecimal128() || isDecimal256(); } + constexpr bool isBFloat16() const { return idx == TypeIndex::BFloat16; } constexpr bool isFloat32() const { return idx == TypeIndex::Float32; } constexpr bool isFloat64() const { return idx == TypeIndex::Float64; } - constexpr bool isFloat() const { return isFloat32() || isFloat64(); } + constexpr bool isFloat() const { return isBFloat16() || isFloat32() || isFloat64(); } constexpr bool isNativeNumber() const { return isNativeInteger() || isFloat(); } constexpr bool isNumber() const { return isInteger() || isFloat() || isDecimal(); } @@ -558,6 +559,7 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int16) \ M(Int32) \ M(Int64) \ + M(BFloat16) \ M(Float32) \ M(Float64) @@ -574,6 +576,7 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int64) \ M(Int128) \ M(Int256) \ + M(BFloat16) \ M(Float32) \ M(Float64) } diff --git a/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h index cf283d3358c..35a6238c71a 100644 --- a/src/DataTypes/NumberTraits.h +++ b/src/DataTypes/NumberTraits.h @@ -74,7 +74,7 @@ template struct ResultOfAdditionMultiplication { using Type = typename Construct< is_signed_v || is_signed_v, - std::is_floating_point_v || std::is_floating_point_v, + is_floating_point_v || is_floating_point_v, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -82,7 +82,7 @@ template struct ResultOfSubtraction { using Type = typename Construct< true, - std::is_floating_point_v || std::is_floating_point_v, + is_floating_point_v || is_floating_point_v, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -113,7 +113,7 @@ template struct ResultOfModulo /// Example: toInt32(-199) % toUInt8(200) will return -199 that does not fit in Int8, only in Int16. static constexpr size_t size_of_result = result_is_signed ? nextSize(sizeof(B)) : sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfPositiveModulo @@ -121,21 +121,21 @@ template struct ResultOfPositiveModulo /// function positive_modulo always return non-negative number. static constexpr size_t size_of_result = sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfModuloLegacy { using Type0 = typename Construct || is_signed_v, false, sizeof(B)>::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfNegate { using Type = typename Construct< true, - std::is_floating_point_v, + is_floating_point_v, is_signed_v ? sizeof(A) : nextSize(sizeof(A))>::Type; }; @@ -143,7 +143,7 @@ template struct ResultOfAbs { using Type = typename Construct< false, - std::is_floating_point_v, + is_floating_point_v, sizeof(A)>::Type; }; @@ -154,7 +154,7 @@ template struct ResultOfBit using Type = typename Construct< is_signed_v || is_signed_v, false, - std::is_floating_point_v || std::is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; + is_floating_point_v || is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; }; template struct ResultOfBitNot @@ -180,7 +180,7 @@ template struct ResultOfBitNot template struct ResultOfIf { - static constexpr bool has_float = std::is_floating_point_v || std::is_floating_point_v; + static constexpr bool has_float = is_floating_point_v || is_floating_point_v; static constexpr bool has_integer = is_integer || is_integer; static constexpr bool has_signed = is_signed_v || is_signed_v; static constexpr bool has_unsigned = !is_signed_v || !is_signed_v; @@ -189,7 +189,7 @@ struct ResultOfIf static constexpr size_t max_size_of_unsigned_integer = max(is_signed_v ? 0 : sizeof(A), is_signed_v ? 0 : sizeof(B)); static constexpr size_t max_size_of_signed_integer = max(is_signed_v ? sizeof(A) : 0, is_signed_v ? sizeof(B) : 0); static constexpr size_t max_size_of_integer = max(is_integer ? sizeof(A) : 0, is_integer ? sizeof(B) : 0); - static constexpr size_t max_size_of_float = max(std::is_floating_point_v ? sizeof(A) : 0, std::is_floating_point_v ? sizeof(B) : 0); + static constexpr size_t max_size_of_float = max(is_floating_point_v ? sizeof(A) : 0, is_floating_point_v ? sizeof(B) : 0); using ConstructedType = typename Construct= max_size_of_float) @@ -211,7 +211,7 @@ template struct ToInteger using Type = typename Construct< is_signed_v, false, - std::is_floating_point_v ? 8 : sizeof(A)>::Type; + is_floating_point_v ? 8 : sizeof(A)>::Type; }; diff --git a/src/DataTypes/Serializations/SerializationNumber.cpp b/src/DataTypes/Serializations/SerializationNumber.cpp index b6c7e4618b8..805253fccee 100644 --- a/src/DataTypes/Serializations/SerializationNumber.cpp +++ b/src/DataTypes/Serializations/SerializationNumber.cpp @@ -176,6 +176,7 @@ template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; +template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; diff --git a/src/DataTypes/Utils.cpp b/src/DataTypes/Utils.cpp index e58331a8bcb..d1e314e77dc 100644 --- a/src/DataTypes/Utils.cpp +++ b/src/DataTypes/Utils.cpp @@ -54,6 +54,13 @@ bool canBeSafelyCasted(const DataTypePtr & from_type, const DataTypePtr & to_typ return false; } + case TypeIndex::BFloat16: + { + if (to_which_type.isFloat32() || to_which_type.isFloat64() || to_which_type.isString()) + return true; + + return false; + } case TypeIndex::Float32: { if (to_which_type.isFloat64() || to_which_type.isString()) diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index e5bdb4b267f..0ed075563e2 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -108,6 +108,8 @@ DataTypePtr getNumericType(const TypeIndexSet & types) maximize(max_bits_of_signed_integer, 128); else if (type == TypeIndex::Int256) maximize(max_bits_of_signed_integer, 256); + else if (type == TypeIndex::BFloat16) + maximize(max_mantissa_bits_of_floating, 8); else if (type == TypeIndex::Float32) maximize(max_mantissa_bits_of_floating, 24); else if (type == TypeIndex::Float64) @@ -144,7 +146,9 @@ DataTypePtr getNumericType(const TypeIndexSet & types) if (max_mantissa_bits_of_floating) { size_t min_mantissa_bits = std::max(min_bit_width_of_integer, max_mantissa_bits_of_floating); - if (min_mantissa_bits <= 24) + if (min_mantissa_bits <= 8) + return std::make_shared(); + else if (min_mantissa_bits <= 24) return std::make_shared(); else if (min_mantissa_bits <= 53) return std::make_shared(); diff --git a/src/DataTypes/getMostSubtype.cpp b/src/DataTypes/getMostSubtype.cpp index 33b5735456e..d0ea716f2ff 100644 --- a/src/DataTypes/getMostSubtype.cpp +++ b/src/DataTypes/getMostSubtype.cpp @@ -297,6 +297,8 @@ DataTypePtr getMostSubtype(const DataTypes & types, bool throw_if_result_is_noth minimize(min_bits_of_signed_integer, 128); else if (typeid_cast(type.get())) minimize(min_bits_of_signed_integer, 256); + else if (typeid_cast(type.get())) + minimize(min_mantissa_bits_of_floating, 8); else if (typeid_cast(type.get())) minimize(min_mantissa_bits_of_floating, 24); else if (typeid_cast(type.get())) @@ -313,7 +315,9 @@ DataTypePtr getMostSubtype(const DataTypes & types, bool throw_if_result_is_noth /// If the result must be floating. if (!min_bits_of_signed_integer && !min_bits_of_unsigned_integer) { - if (min_mantissa_bits_of_floating <= 24) + if (min_mantissa_bits_of_floating <= 8) + return std::make_shared(); + else if (min_mantissa_bits_of_floating <= 24) return std::make_shared(); else if (min_mantissa_bits_of_floating <= 53) return std::make_shared(); diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index dd37c25719c..872991709af 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -540,7 +540,7 @@ namespace case FieldTypeId::TYPE_ENUM: { - if (std::is_floating_point_v) + if (is_floating_point_v) incompatibleColumnType(TypeName); write_function = [this](NumberType value) diff --git a/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h index ff07309e248..2508bd2b62b 100644 --- a/src/Functions/DivisionUtils.h +++ b/src/Functions/DivisionUtils.h @@ -47,9 +47,9 @@ inline auto checkedDivision(A a, B b) { throwIfDivisionLeadsToFPE(a, b); - if constexpr (is_big_int_v && std::is_floating_point_v) + if constexpr (is_big_int_v && is_floating_point_v) return static_cast(a) / b; - else if constexpr (is_big_int_v && std::is_floating_point_v) + else if constexpr (is_big_int_v && is_floating_point_v) return a / static_cast(b); else if constexpr (is_big_int_v && is_big_int_v) return static_cast(a / b); @@ -86,17 +86,17 @@ struct DivideIntegralImpl { /// Comparisons are not strict to avoid rounding issues when operand is implicitly casted to float. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(a) || a >= std::numeric_limits::max() || a <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(b) || b >= std::numeric_limits::max() || b <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); auto res = checkedDivision(CastA(a), CastB(b)); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(res) || res >= static_cast(std::numeric_limits::max()) || res <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division, because it will produce infinite or too large number"); @@ -122,18 +122,18 @@ struct ModuloImpl template static inline Result apply(A a, B b) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return static_cast(a) - trunc(static_cast(a) / static_cast(b)) * static_cast(b); } else { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(a) || a > std::numeric_limits::max() || a < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(b) || b > std::numeric_limits::max() || b < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); diff --git a/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h index 9f400932356..8395855a564 100644 --- a/src/Functions/FunctionMathUnary.h +++ b/src/Functions/FunctionMathUnary.h @@ -66,7 +66,7 @@ private: /// Process all data as a whole and use FastOps implementation /// If the argument is integer, convert to Float64 beforehand - if constexpr (!std::is_floating_point_v) + if constexpr (!is_floating_point_v) { PODArray tmp_vec(size); for (size_t i = 0; i < size; ++i) @@ -150,7 +150,7 @@ private: { using Types = std::decay_t; using Type = typename Types::RightType; - using ReturnType = std::conditional_t, Float64, Type>; + using ReturnType = std::conditional_t, Float64, Type>; using ColVecType = ColumnVectorOrDecimal; const auto col_vec = checkAndGetColumn(col.column.get()); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index eed75788fcd..fe4b14f5053 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -291,7 +291,7 @@ struct ConvertImpl else { /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + if constexpr (is_floating_point_v && !is_floating_point_v) { if (!isFinite(vec_from[i])) { @@ -1314,7 +1314,7 @@ inline void convertFromTime(DataTypeDateTime::FieldType & x, t template void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (precise_float_parsing) readFloatTextPrecise(x, rb); @@ -1378,7 +1378,7 @@ inline void parseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb template bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (precise_float_parsing) return tryReadFloatTextPrecise(x, rb); @@ -2350,9 +2350,9 @@ private: using RightT = typename RightDataType::FieldType; static constexpr bool bad_left = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point_v || is_big_int_v || is_signed_v; static constexpr bool bad_right = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point_v || is_big_int_v || is_signed_v; /// Disallow int vs UUID conversion (but support int vs UInt128 conversion) if constexpr ((bad_left && std::is_same_v) || @@ -2678,7 +2678,7 @@ struct ToNumberMonotonicity /// Float cases. /// When converting to Float, the conversion is always monotonic. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return { .is_monotonic = true, .is_always_monotonic = true }; const auto * low_cardinality = typeid_cast(&type); diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 3d1028c6d35..d775d616eb2 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -461,7 +461,7 @@ template - using FunctionRoundingImpl = std::conditional_t, + using FunctionRoundingImpl = std::conditional_t, FloatRoundingImpl, IntegerRoundingImpl>; diff --git a/src/Functions/factorial.cpp b/src/Functions/factorial.cpp index b814e8198e6..be545e398cd 100644 --- a/src/Functions/factorial.cpp +++ b/src/Functions/factorial.cpp @@ -21,7 +21,7 @@ struct FactorialImpl static inline NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (std::is_floating_point_v || is_over_big_int) + if constexpr (is_floating_point_v || is_over_big_int) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type of argument of function factorial, should not be floating point or big int"); diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index 04877a42b18..109e5894f5e 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp index 3551ae74c5f..bfd786940ce 100644 --- a/src/Functions/moduloOrZero.cpp +++ b/src/Functions/moduloOrZero.cpp @@ -17,7 +17,7 @@ struct ModuloOrZeroImpl template static inline Result apply(A a, B b) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b); diff --git a/src/Functions/multiply.cpp b/src/Functions/multiply.cpp index 4dc8cd10f31..ef51fe6061e 100644 --- a/src/Functions/multiply.cpp +++ b/src/Functions/multiply.cpp @@ -18,8 +18,8 @@ struct MultiplyImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) * static_cast(static_cast(b)); } diff --git a/src/Functions/plus.cpp b/src/Functions/plus.cpp index cd9cf6cec5c..ea79fb4702a 100644 --- a/src/Functions/plus.cpp +++ b/src/Functions/plus.cpp @@ -19,8 +19,8 @@ struct PlusImpl /// Next everywhere, static_cast - so that there is no wrong result in expressions of the form Int64 c = UInt32(a) * Int32(-1). if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) + static_cast(static_cast(b)); } diff --git a/src/Functions/sign.cpp b/src/Functions/sign.cpp index 6c849760eed..59a307e43bb 100644 --- a/src/Functions/sign.cpp +++ b/src/Functions/sign.cpp @@ -13,7 +13,7 @@ struct SignImpl static inline NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_decimal || std::is_floating_point_v) + if constexpr (is_decimal || is_floating_point_v) return a < A(0) ? -1 : a == A(0) ? 0 : 1; else if constexpr (is_signed_v) return a < 0 ? -1 : a == 0 ? 0 : 1; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 85584d63ee8..6068f49f5bf 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1316,7 +1316,9 @@ inline bool tryReadText(UUID & x, ReadBuffer & buf) { return tryReadUUIDText(x, inline bool tryReadText(IPv4 & x, ReadBuffer & buf) { return tryReadIPv4Text(x, buf); } inline bool tryReadText(IPv6 & x, ReadBuffer & buf) { return tryReadIPv6Text(x, buf); } -inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatText(x, buf); } +template +requires is_floating_point_v +inline void readText(T & x, ReadBuffer & buf) { readFloatText(x, buf); } inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index b4f8b476b11..c6a86b05f4d 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -153,6 +153,7 @@ inline void writeBoolText(bool x, WriteBuffer & buf) template +requires is_floating_point_v inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; @@ -169,10 +170,13 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } else { - if (DecomposedFloat32(x).isIntegerInRepresentableRange()) - result = itoa(Int32(x), buffer) - buffer; + /// This will support 16-bit floats as well. + float f32 = x; + + if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) + result = itoa(Int32(f32), buffer) - buffer; else - result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; + result = jkj::dragonbox::to_chars_n(f32, buffer) - buffer; } if (result <= 0) @@ -181,10 +185,9 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } template +requires is_floating_point_v inline void writeFloatText(T x, WriteBuffer & buf) { - static_assert(std::is_same_v || std::is_same_v, "Argument for writeFloatText must be float or double"); - using Converter = DoubleConverter; if (likely(buf.available() >= Converter::MAX_REPRESENTATION_LENGTH)) { @@ -530,7 +533,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) bool is_finite = isFinite(x); const bool need_quote = (is_integer && (sizeof(T) >= 8) && settings.json.quote_64bit_integers) - || (settings.json.quote_denormals && !is_finite) || (is_floating_point && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); + || (settings.json.quote_denormals && !is_finite) || (is_floating_point_v && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); if (need_quote) writeChar('"', ostr); @@ -541,7 +544,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) writeCString("null", ostr); else { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (std::signbit(x)) { @@ -800,7 +803,6 @@ inline void writeXMLStringForTextElement(std::string_view s, WriteBuffer & buf) } /// @brief Serialize `uuid` into an array of characters in big-endian byte order. -/// @param uuid UUID to serialize. /// @return Array of characters in big-endian byte order. std::array formatUUID(const UUID & uuid); @@ -1065,7 +1067,9 @@ inline void writeText(is_integer auto x, WriteBuffer & buf) writeIntText(x, buf); } -inline void writeText(is_floating_point auto x, WriteBuffer & buf) { writeFloatText(x, buf); } +template +requires is_floating_point_v +inline void writeText(T x, WriteBuffer & buf) { writeFloatText(x, buf); } inline void writeText(is_enum auto x, WriteBuffer & buf) { writeText(magic_enum::enum_name(x), buf); } diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 4335cde47f9..61caacd8346 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -181,7 +181,7 @@ private: if (!sorted.load(std::memory_order_relaxed)) { - if constexpr (std::is_arithmetic_v && !std::is_floating_point_v) + if constexpr (std::is_arithmetic_v && !is_floating_point_v) { if (likely(entries.size() > 256)) { From becbef9e489b477b2a3fdd0de6ab754941d14351 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 1 Apr 2024 02:08:35 +0000 Subject: [PATCH 002/566] sketch of read-in-order optimization --- .../IMergingAlgorithmWithDelayedChunk.cpp | 2 +- .../IMergingAlgorithmWithSharedChunks.cpp | 2 +- .../Algorithms/MergeTreePartLevelInfo.h | 25 ------------- .../Merges/Algorithms/MergeTreeReadInfo.h | 35 +++++++++++++++++++ .../Algorithms/MergingSortedAlgorithm.cpp | 10 ++++++ .../QueryPlan/ReadFromMergeTree.cpp | 12 +++++-- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- .../MergeTree/MergeTreeRangeReader.cpp | 17 +++++++-- src/Storages/MergeTree/MergeTreeRangeReader.h | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 8 ++++- src/Storages/MergeTree/MergeTreeReadTask.h | 6 ++++ .../MergeTree/MergeTreeSelectProcessor.cpp | 13 +++++-- .../MergeTree/MergeTreeSelectProcessor.h | 6 ++++ .../MergeTree/MergeTreeSequentialSource.cpp | 5 +-- 14 files changed, 106 insertions(+), 39 deletions(-) delete mode 100644 src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h create mode 100644 src/Processors/Merges/Algorithms/MergeTreeReadInfo.h diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp index cbad6813fbc..13b245717b3 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp @@ -1,5 +1,5 @@ #include -#include +#include namespace DB diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp index c8b69382e89..4fe50feaede 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp @@ -1,5 +1,5 @@ #include -#include +#include namespace DB { diff --git a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h b/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h deleted file mode 100644 index bcf4e759024..00000000000 --- a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -/// To carry part level if chunk is produced by a merge tree source -class MergeTreePartLevelInfo : public ChunkInfo -{ -public: - MergeTreePartLevelInfo() = delete; - explicit MergeTreePartLevelInfo(ssize_t part_level) : origin_merge_tree_part_level(part_level) { } - size_t origin_merge_tree_part_level = 0; -}; - -inline size_t getPartLevelFromChunk(const Chunk & chunk) -{ - const auto & info = chunk.getChunkInfo(); - if (const auto * part_level_info = typeid_cast(info.get())) - return part_level_info->origin_merge_tree_part_level; - return 0; -} - -} diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h new file mode 100644 index 00000000000..e79df0fb8c8 --- /dev/null +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +namespace DB +{ + +/// To carry part level and virtual row if chunk is produced by a merge tree source +class MergeTreeReadInfo : public ChunkInfo +{ +public: + MergeTreeReadInfo() = delete; + explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : + origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } + size_t origin_merge_tree_part_level = 0; + bool virtual_row = false; +}; + +inline size_t getPartLevelFromChunk(const Chunk & chunk) +{ + const auto & info = chunk.getChunkInfo(); + if (const auto * read_info = typeid_cast(info.get())) + return read_info->origin_merge_tree_part_level; + return 0; +} + +inline bool getVirtualRowFromChunk(const Chunk & chunk) +{ + const auto & info = chunk.getChunkInfo(); + if (const auto * read_info = typeid_cast(info.get())) + return read_info->virtual_row; + return 0; +} + +} diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 1debfcec8e0..89f0193b05b 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -239,6 +240,15 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; + if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + { + /// If virtual row is detected, there should be only one row as a single chunk, + /// and always skip this chunk to pull the next one. + assert(initial_batch_size == 1); + queue.removeTop(); + return Status(current.impl->order); + } + bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index f4607cad040..91cd362f1d9 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -501,7 +501,8 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 limit) + UInt64 limit, + bool need_virtual_row) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -596,6 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); + processor->addVirtualRowToChunk(need_virtual_row); + auto source = std::make_shared(std::move(processor)); if (set_rows_approx) source->addTotalRowsApprox(total_rows); @@ -1028,7 +1031,12 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( } for (auto && item : splitted_parts_and_ranges) - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); + { + /// need_virtual_row = true means a MergingSortedTransform should occur. + /// If so, adding a virtual row might speedup in the case of multiple parts. + bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); + } } Block pipe_header; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 5ed742a9bfd..6a08622af11 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -251,7 +251,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool need_virtual_row = false); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 6932762f58b..0456a8e2787 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -946,7 +946,7 @@ String addDummyColumnWithRowCount(Block & block, size_t num_rows) } -MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges) +MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row) { if (max_rows == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected at least 1 row to read, got 0."); @@ -961,7 +961,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar if (prev_reader) { - read_result = prev_reader->read(max_rows, ranges); + read_result = prev_reader->read(max_rows, ranges, add_virtual_row); size_t num_read_rows; Columns columns = continueReadingChain(read_result, num_read_rows); @@ -1026,8 +1026,15 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar } else { + // if (add_virtual_row) + // { + // generate the virtual row + // } + // else + // { read_result = startReadingChain(max_rows, ranges); read_result.num_rows = read_result.numReadRows(); + // } LOG_TEST(log, "First reader returned: {}, requested columns: {}", read_result.dumpInfo(), dumpNames(merge_tree_reader->getColumns())); @@ -1062,7 +1069,11 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar read_result.addNumBytesRead(total_bytes); } - executePrewhereActionsAndFilterColumns(read_result); + /// If add_virtual_row is enabled, don't turn on prewhere so that virtual row can always pass through. + // if (!add_virtual_row) + // { + executePrewhereActionsAndFilterColumns(read_result); + // } read_result.checkInternalConsistency(); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 688a6b0922b..d8cf33b0340 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -300,7 +300,7 @@ public: LoggerPtr log; }; - ReadResult read(size_t max_rows, MarkRanges & ranges); + ReadResult read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row); const Block & getSampleBlock() const { return result_sample_block; } diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 08b30e445e2..498c62080a9 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -158,7 +158,13 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam UInt64 recommended_rows = estimateNumRows(params); UInt64 rows_to_read = std::max(static_cast(1), std::min(params.max_block_size_rows, recommended_rows)); - auto read_result = range_readers.main.read(rows_to_read, mark_ranges); + auto read_result = range_readers.main.read(rows_to_read, mark_ranges, add_virtual_row); + + if (add_virtual_row) + { + /// Now we have the virtual row, which is at most once for each part. + add_virtual_row = false; + } /// All rows were filtered. Repeat. if (read_result.num_rows == 0) diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index c8bb501c0e8..73927d62959 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -117,6 +117,7 @@ public: size_t row_count = 0; size_t num_read_rows = 0; size_t num_read_bytes = 0; + bool is_virtual_row = false; }; MergeTreeReadTask( @@ -140,6 +141,8 @@ public: static Readers createReaders(const MergeTreeReadTaskInfoPtr & read_info, const Extras & extras, const MarkRanges & ranges); static RangeReaders createRangeReaders(const Readers & readers, const PrewhereExprInfo & prewhere_actions); + void addVirtualRow() { add_virtual_row = true; } + private: UInt64 estimateNumRows(const BlockSizeParams & params) const; @@ -158,6 +161,9 @@ private: /// Used to satistfy preferred_block_size_bytes limitation MergeTreeBlockSizePredictorPtr size_predictor; + + /// If true, add once, and then set false. + bool add_virtual_row = false; }; using MergeTreeReadTaskPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index fce733d47b7..f61365b0916 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -134,6 +134,13 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); + if (add_virtual_row) + { + /// Turn on virtual row just once. + task->addVirtualRow(); + add_virtual_row = false; + } + auto res = algorithm->readFromTask(*task, block_size_params); if (res.row_count) @@ -148,7 +155,9 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } return ChunkAndProgress{ - .chunk = Chunk(ordered_columns, res.row_count, add_part_level ? std::make_shared(task->getInfo().data_part->info.level) : nullptr), + .chunk = Chunk(ordered_columns, res.row_count, + add_part_level || res.is_virtual_row ? std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), res.is_virtual_row) : nullptr), .num_read_rows = res.num_read_rows, .num_read_bytes = res.num_read_bytes, .is_finished = false}; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 01bb3851e04..106190f15c3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,6 +65,8 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } + void addVirtualRowToChunk(bool add_virtual_row_) { add_virtual_row = add_virtual_row_; } + private: /// This struct allow to return block with no columns but with non-zero number of rows similar to Chunk struct BlockAndProgress @@ -99,6 +101,10 @@ private: /// Should we add part level to produced chunk. Part level is useful for next steps if query has FINAL bool add_part_level = false; + /// Should we add a virtual row as the single first chunk. + /// Virtual row is useful for read-in-order optimization when multiple parts exist. + bool add_virtual_row = false; + LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 81eb166b300..bffea59d5d6 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include namespace DB { @@ -262,7 +262,8 @@ try ++it; } - return Chunk(std::move(res_columns), rows_read, add_part_level ? std::make_shared(data_part->info.level) : nullptr); + return Chunk(std::move(res_columns), rows_read, + add_part_level ? std::make_shared(data_part->info.level, false) : nullptr); } } else From 72ebd3957251bc0ca9355f80d034b3f7d3083a3e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 8 Apr 2024 02:27:54 +0000 Subject: [PATCH 003/566] add simple virtual row --- .../QueryPlan/ReadFromMergeTree.cpp | 2 +- .../MergeTree/MergeTreeRangeReader.cpp | 17 ++----- src/Storages/MergeTree/MergeTreeRangeReader.h | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadTask.h | 3 -- .../MergeTree/MergeTreeSelectProcessor.cpp | 50 ++++++++++++++----- .../MergeTree/MergeTreeSelectProcessor.h | 8 ++- 7 files changed, 51 insertions(+), 33 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 91cd362f1d9..7f7f2673aee 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,7 +597,7 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex()); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 0456a8e2787..6932762f58b 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -946,7 +946,7 @@ String addDummyColumnWithRowCount(Block & block, size_t num_rows) } -MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row) +MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges) { if (max_rows == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected at least 1 row to read, got 0."); @@ -961,7 +961,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar if (prev_reader) { - read_result = prev_reader->read(max_rows, ranges, add_virtual_row); + read_result = prev_reader->read(max_rows, ranges); size_t num_read_rows; Columns columns = continueReadingChain(read_result, num_read_rows); @@ -1026,15 +1026,8 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar } else { - // if (add_virtual_row) - // { - // generate the virtual row - // } - // else - // { read_result = startReadingChain(max_rows, ranges); read_result.num_rows = read_result.numReadRows(); - // } LOG_TEST(log, "First reader returned: {}, requested columns: {}", read_result.dumpInfo(), dumpNames(merge_tree_reader->getColumns())); @@ -1069,11 +1062,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar read_result.addNumBytesRead(total_bytes); } - /// If add_virtual_row is enabled, don't turn on prewhere so that virtual row can always pass through. - // if (!add_virtual_row) - // { - executePrewhereActionsAndFilterColumns(read_result); - // } + executePrewhereActionsAndFilterColumns(read_result); read_result.checkInternalConsistency(); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index d8cf33b0340..688a6b0922b 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -300,7 +300,7 @@ public: LoggerPtr log; }; - ReadResult read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row); + ReadResult read(size_t max_rows, MarkRanges & ranges); const Block & getSampleBlock() const { return result_sample_block; } diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 498c62080a9..3c4d121195f 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -158,7 +158,7 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam UInt64 recommended_rows = estimateNumRows(params); UInt64 rows_to_read = std::max(static_cast(1), std::min(params.max_block_size_rows, recommended_rows)); - auto read_result = range_readers.main.read(rows_to_read, mark_ranges, add_virtual_row); + auto read_result = range_readers.main.read(rows_to_read, mark_ranges); if (add_virtual_row) { diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index 73927d62959..709fc73f16e 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -117,7 +117,6 @@ public: size_t row_count = 0; size_t num_read_rows = 0; size_t num_read_bytes = 0; - bool is_virtual_row = false; }; MergeTreeReadTask( @@ -141,8 +140,6 @@ public: static Readers createReaders(const MergeTreeReadTaskInfoPtr & read_info, const Extras & extras, const MarkRanges & ranges); static RangeReaders createRangeReaders(const Readers & readers, const PrewhereExprInfo & prewhere_actions); - void addVirtualRow() { add_virtual_row = true; } - private: UInt64 estimateNumRows(const BlockSizeParams & params) const; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index f61365b0916..d75802c68f3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -133,38 +133,64 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - + add_virtual_row = false; if (add_virtual_row) { /// Turn on virtual row just once. - task->addVirtualRow(); add_virtual_row = false; - } - auto res = algorithm->readFromTask(*task, block_size_params); + const auto & primary_key = storage_snapshot->metadata->primary_key; + + MergeTreeReadTask::BlockAndProgress res; + res.row_count = 1; - if (res.row_count) - { /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); for (size_t i = 0; i < result_header.columns(); ++i) { - auto name = result_header.getByPosition(i).name; - ordered_columns.push_back(res.block.getByName(name).column); + // TODO: composite pk??? + const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); + if (type_and_name.name == primary_key.column_names[0] && type_and_name.type == primary_key.data_types[0]) + ordered_columns.push_back(index[0]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + else + ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); } return ChunkAndProgress{ - .chunk = Chunk(ordered_columns, res.row_count, - add_part_level || res.is_virtual_row ? std::make_shared( - (add_part_level ? task->getInfo().data_part->info.level : 0), res.is_virtual_row) : nullptr), + .chunk = Chunk(ordered_columns, res.row_count, std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), true)), .num_read_rows = res.num_read_rows, .num_read_bytes = res.num_read_bytes, .is_finished = false}; } else { - return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; + auto res = algorithm->readFromTask(*task, block_size_params); + + if (res.row_count) + { + /// Reorder the columns according to result_header + Columns ordered_columns; + ordered_columns.reserve(result_header.columns()); + for (size_t i = 0; i < result_header.columns(); ++i) + { + auto name = result_header.getByPosition(i).name; + ordered_columns.push_back(res.block.getByName(name).column); + } + + return ChunkAndProgress{ + .chunk = Chunk(ordered_columns, res.row_count, + add_part_level ? std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), false) : nullptr), + .num_read_rows = res.num_read_rows, + .num_read_bytes = res.num_read_bytes, + .is_finished = false}; + } + else + { + return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; + } } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 106190f15c3..67a03ca2533 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,11 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_) { add_virtual_row = add_virtual_row_; } + void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_) + { + add_virtual_row = add_virtual_row_; + index = index_; + } private: /// This struct allow to return block with no columns but with non-zero number of rows similar to Chunk @@ -105,6 +109,8 @@ private: /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; + Columns index; + LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; From 57a2a20900176da28b73f027fc298f7cb7f91781 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 10 Apr 2024 04:02:15 +0000 Subject: [PATCH 004/566] support composite pk --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index d75802c68f3..868e757e135 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -133,7 +133,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - add_virtual_row = false; + if (add_virtual_row) { /// Turn on virtual row just once. @@ -147,12 +147,14 @@ ChunkAndProgress MergeTreeSelectProcessor::read() /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); - for (size_t i = 0; i < result_header.columns(); ++i) + for (size_t i = 0, j = 0; i < result_header.columns(); ++i) { - // TODO: composite pk??? const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); - if (type_and_name.name == primary_key.column_names[0] && type_and_name.type == primary_key.data_types[0]) - ordered_columns.push_back(index[0]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) + { + ordered_columns.push_back(index[j]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + ++j; + } else ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); } From bd4385f969c5139870dcfc0ce9d72e6029ab9a59 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Apr 2024 23:28:16 +0000 Subject: [PATCH 005/566] add test --- .../Merges/Algorithms/MergeTreeReadInfo.h | 2 +- ...03031_read_in_order_optimization.reference | 5 ++ .../03031_read_in_order_optimization.sql | 48 +++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization.sql diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index e79df0fb8c8..ca4bccb235f 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -29,7 +29,7 @@ inline bool getVirtualRowFromChunk(const Chunk & chunk) const auto & info = chunk.getChunkInfo(); if (const auto * read_info = typeid_cast(info.get())) return read_info->virtual_row; - return 0; + return false; } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference new file mode 100644 index 00000000000..304f7f7a049 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -0,0 +1,5 @@ +0 +1 +2 +3 +24578 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql new file mode 100644 index 00000000000..eecbfe64f6d --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -0,0 +1,48 @@ + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `x` UInt64, + `y` UInt64, + `z` UInt64, + `k` UInt64 +) +ENGINE = MergeTree +ORDER BY (x, y, z) +SETTINGS index_granularity = 8192; + +INSERT INTO t SELECT + number, + number, + number, + number +FROM numbers(8192 * 3); + +INSERT INTO t SELECT + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3) +FROM numbers(8192 * 3); + +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, +max_threads = 1, +optimize_read_in_order = 1; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND query like '%SELECT x%' +AND query not like '%system.query_log%' +ORDER BY query_start_time DESC, read_rows DESC +LIMIT 1; + +DROP TABLE t; \ No newline at end of file From cc3fd0e73693e877967b1f5572d8d6779088fa06 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 23 Apr 2024 02:43:49 +0000 Subject: [PATCH 006/566] minor change --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 868e757e135..1f97fec2013 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -150,13 +150,17 @@ ChunkAndProgress MergeTreeSelectProcessor::read() for (size_t i = 0, j = 0; i < result_header.columns(); ++i) { const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { - ordered_columns.push_back(index[j]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + auto column = current_column->cloneEmpty(); + column->insert((*index[j])[0]); // TODO: use the first range pk whose range might contain results + ordered_columns.push_back(std::move(column)); ++j; } else - ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); + ordered_columns.push_back(current_column->cloneResized(1)); } return ChunkAndProgress{ From 7f6d6400230eb90e27a99226c89ac0c5acb0d709 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 24 Apr 2024 01:14:04 +0000 Subject: [PATCH 007/566] use a better range begin in virtual row --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeSelectProcessor.h | 7 +++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 7f7f2673aee..f873bcb6104 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,7 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex()); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 1f97fec2013..a3fcfad3bb5 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -155,7 +155,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); - column->insert((*index[j])[0]); // TODO: use the first range pk whose range might contain results + column->insert((*index[j])[mark_range_begin]); ordered_columns.push_back(std::move(column)); ++j; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 67a03ca2533..352f771f9ce 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,10 +65,11 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_) + void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; + mark_range_begin = mark_range_begin_; } private: @@ -108,8 +109,10 @@ private: /// Should we add a virtual row as the single first chunk. /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; - + /// PK index used in virtual row. Columns index; + /// The first range that might contain the candidate, used in virtual row. + size_t mark_range_begin; LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; From ba049d85b3126b766575f958b61fc2f84bb3a11b Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 26 Apr 2024 02:45:50 +0000 Subject: [PATCH 008/566] fix test --- tests/queries/0_stateless/03031_read_in_order_optimization.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index eecbfe64f6d..f114a838ff3 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -10,7 +10,8 @@ CREATE TABLE t ) ENGINE = MergeTree ORDER BY (x, y, z) -SETTINGS index_granularity = 8192; +SETTINGS index_granularity = 8192, +index_granularity_bytes = 10485760; INSERT INTO t SELECT number, From 86c7488647750f65d7a75dd4774f84fcf44f763b Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 4 May 2024 02:09:17 +0000 Subject: [PATCH 009/566] only read one chunk in mergetramsform when meet virtual row --- src/Processors/Merges/IMergingTransform.cpp | 7 +++++-- .../MergeTree/MergeTreeSelectProcessor.h | 2 +- ...03031_read_in_order_optimization.reference | 2 +- .../03031_read_in_order_optimization.sql | 20 +++++++++++++++++++ 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index fbb47969b2f..50b3e2ca634 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -1,3 +1,4 @@ +#include #include namespace DB @@ -101,8 +102,10 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// setNotNeeded after reading first chunk, because in optimismtic case /// (e.g. with optimized 'ORDER BY primary_key LIMIT n' and small 'n') /// we won't have to read any chunks anymore; - auto chunk = input.pull(limit_hint != 0); - if ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) + /// If virtual row exists, test it first, so don't read more chunks. + auto chunk = input.pull(true); + if ((limit_hint == 0 && !getVirtualRowFromChunk(chunk)) + || (limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) input.setNeeded(); if (!chunk.hasRows()) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 352f771f9ce..255b4c65ff9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,7 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_, size_t mark_range_begin_) + void addVirtualRowToChunk(bool add_virtual_row_, const Columns & index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 304f7f7a049..70d79aecf43 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -2,4 +2,4 @@ 1 2 3 -24578 +16386 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index f114a838ff3..999d2e265d0 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -46,4 +46,24 @@ AND query not like '%system.query_log%' ORDER BY query_start_time DESC, read_rows DESC LIMIT 1; +-- SELECT x +-- FROM t +-- ORDER BY x ASC +-- LIMIT 4 +-- SETTINGS max_block_size = 8192, +-- read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +-- max_threads = 1, +-- optimize_read_in_order = 1; + +-- SYSTEM FLUSH LOGS; + +-- -- without virtual row 16.38k, but with virtual row 24.58k, becasue read again (why?) in the non-target part after reading its virtual row and before sending the virtual row to the priority queue +-- SELECT read_rows +-- FROM system.query_log +-- WHERE current_database = currentDatabase() +-- AND query like '%SELECT x%' +-- AND query not like '%system.query_log%' +-- ORDER BY query_start_time DESC, read_rows DESC +-- LIMIT 1; + DROP TABLE t; \ No newline at end of file From 04a757eb71a0cdff42f804043025dd6e900e9283 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 4 May 2024 17:37:56 +0000 Subject: [PATCH 010/566] fix --- src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 7592f37ba22..7da73349c4a 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -234,7 +234,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & { /// If virtual row is detected, there should be only one row as a single chunk, /// and always skip this chunk to pull the next one. - assert(initial_batch_size == 1); + chassert(initial_batch_size == 1); queue.removeTop(); return Status(current.impl->order); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index cdf301f8044..d7e7f9ae758 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -598,8 +598,9 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + auto primary_key_index = part_with_ranges.data_part->getIndex(); + chassert(primary_key_index); + processor->addVirtualRowToChunk(need_virtual_row, *primary_key_index, part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) From 1c2c3aed249ea77f0d667e1a9173ca39a5a88858 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 6 May 2024 13:25:19 +0000 Subject: [PATCH 011/566] support non-preliminary merge case --- src/Processors/Merges/IMergingTransform.cpp | 9 +++-- .../QueryPlan/ReadFromMergeTree.cpp | 8 ++--- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +-- .../MergeTree/MergeTreeSelectProcessor.h | 4 +-- ...03031_read_in_order_optimization.reference | 5 +++ .../03031_read_in_order_optimization.sql | 33 +++++++++---------- 6 files changed, 34 insertions(+), 29 deletions(-) diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index 50b3e2ca634..3daeca254ed 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -102,10 +102,13 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// setNotNeeded after reading first chunk, because in optimismtic case /// (e.g. with optimized 'ORDER BY primary_key LIMIT n' and small 'n') /// we won't have to read any chunks anymore; - /// If virtual row exists, test it first, so don't read more chunks. + /// If virtual row exists, let it pass through, so don't read more chunks. auto chunk = input.pull(true); - if ((limit_hint == 0 && !getVirtualRowFromChunk(chunk)) - || (limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) + bool virtual_row = getVirtualRowFromChunk(chunk); + if (limit_hint == 0 && !virtual_row) + input.setNeeded(); + + if (!virtual_row && ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end)) input.setNeeded(); if (!chunk.hasRows()) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index d7e7f9ae758..4386d435732 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,10 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - - auto primary_key_index = part_with_ranges.data_part->getIndex(); - chassert(primary_key_index); - processor->addVirtualRowToChunk(need_virtual_row, *primary_key_index, part_with_ranges.ranges.front().begin); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) @@ -1037,7 +1035,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( { /// need_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + bool need_virtual_row = item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index a3fcfad3bb5..4feef5115bf 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -152,10 +152,10 @@ ChunkAndProgress MergeTreeSelectProcessor::read() const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); ColumnPtr current_column = type_and_name.type->createColumn(); - if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) + if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); - column->insert((*index[j])[mark_range_begin]); + column->insert((*(*index)[j])[mark_range_begin]); ordered_columns.push_back(std::move(column)); ++j; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 3dab11b556c..7a562c1a115 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,7 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns & index_, size_t mark_range_begin_) + void addVirtualRowToChunk(bool add_virtual_row_, const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; @@ -101,7 +101,7 @@ private: /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; /// PK index used in virtual row. - Columns index; + IMergeTreeDataPart::Index index; /// The first range that might contain the candidate, used in virtual row. size_t mark_range_begin; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 70d79aecf43..62e8669fbe0 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -3,3 +3,8 @@ 2 3 16386 +0 +1 +2 +3 +16386 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 999d2e265d0..57f9838392f 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -46,24 +46,23 @@ AND query not like '%system.query_log%' ORDER BY query_start_time DESC, read_rows DESC LIMIT 1; --- SELECT x --- FROM t --- ORDER BY x ASC --- LIMIT 4 --- SETTINGS max_block_size = 8192, --- read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge --- max_threads = 1, --- optimize_read_in_order = 1; +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1; --- SYSTEM FLUSH LOGS; +SYSTEM FLUSH LOGS; --- -- without virtual row 16.38k, but with virtual row 24.58k, becasue read again (why?) in the non-target part after reading its virtual row and before sending the virtual row to the priority queue --- SELECT read_rows --- FROM system.query_log --- WHERE current_database = currentDatabase() --- AND query like '%SELECT x%' --- AND query not like '%system.query_log%' --- ORDER BY query_start_time DESC, read_rows DESC --- LIMIT 1; +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND query like '%SELECT x%' +AND query not like '%system.query_log%' +ORDER BY query_start_time DESC, read_rows DESC +LIMIT 1; DROP TABLE t; \ No newline at end of file From 0537b8c833b69638c3868497f935f9bb7cf46e0a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 8 May 2024 00:17:37 +0000 Subject: [PATCH 012/566] restrict to preliminary merge and add more tests --- .../QueryPlan/ReadFromMergeTree.cpp | 2 +- ...03031_read_in_order_optimization.reference | 7 ++- .../03031_read_in_order_optimization.sql | 48 +++++++++++++++---- 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 4386d435732..9a0469f49a8 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1035,7 +1035,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( { /// need_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = item.size() > 1; + bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 62e8669fbe0..c73f79d8dce 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -3,8 +3,13 @@ 2 3 16386 +16385 +16386 +16387 +16388 +24578 0 1 2 3 -16386 +16384 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 57f9838392f..597845564e4 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -24,28 +24,55 @@ INSERT INTO t SELECT number + (8192 * 3), number + (8192 * 3), number + (8192 * 3), - number + (8192 * 3) + number FROM numbers(8192 * 3); +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. SELECT x FROM t ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 0, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, -optimize_read_in_order = 1; +optimize_read_in_order = 1, +log_comment = 'no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND query like '%SELECT x%' -AND query not like '%system.query_log%' -ORDER BY query_start_time DESC, read_rows DESC +AND log_comment = 'no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +limit 1; + +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC LIMIT 1; +-- Should not impact cases without preliminary merge (might read again when chunk row is less than limit) SELECT x FROM t ORDER BY x ASC @@ -53,16 +80,17 @@ LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge max_threads = 1, -optimize_read_in_order = 1; +optimize_read_in_order = 1, +log_comment = 'no impact'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND query like '%SELECT x%' -AND query not like '%system.query_log%' -ORDER BY query_start_time DESC, read_rows DESC +AND log_comment = 'no impact' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC LIMIT 1; DROP TABLE t; \ No newline at end of file From 8f8ba55ac3cc254d4a890ed6f45cb5a4ef411143 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 14 May 2024 19:43:47 +0000 Subject: [PATCH 013/566] add check flag --- .../Merges/Algorithms/MergeTreeReadInfo.h | 1 + .../QueryPlan/ReadFromMergeTree.cpp | 5 ++- src/Processors/QueryPlan/SortingStep.cpp | 31 ++++++++++++++ src/QueryPipeline/QueryPipelineBuilder.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- .../MergeTree/MergeTreeSelectProcessor.h | 7 ++-- src/Storages/MergeTree/MergeTreeSource.h | 2 + ...03031_read_in_order_optimization.reference | 7 +++- .../03031_read_in_order_optimization.sql | 40 +++++++++++++++---- 9 files changed, 84 insertions(+), 15 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index ca4bccb235f..52ca92b471a 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -13,6 +13,7 @@ public: explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } size_t origin_merge_tree_part_level = 0; + /// If virtual_row is true, the chunk must contain the virtual row only. bool virtual_row = false; }; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 9a0469f49a8..2f1db9539a6 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,8 +597,9 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + processor->addVirtualRowToChunk(part_with_ranges.data_part->getIndex(), part_with_ranges.ranges.front().begin); + if (need_virtual_row) + processor->enableVirtualRow(); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index d0491cb4b82..d728e8fb154 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -13,6 +13,9 @@ #include #include +#include +#include +#include namespace CurrentMetrics { @@ -243,6 +246,34 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { + /// We check every step of this pipeline, to make sure virtual row can work correctly. + /// Currently ExpressionTransform is supported, should add other processors if possible. + const auto& pipe = pipeline.getPipe(); + bool enable_virtual_row = true; + std::vector> merge_tree_sources; + for (const auto & processor : pipe.getProcessors()) + { + if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) + { + merge_tree_sources.push_back(merge_tree_source); + } + else if (!std::dynamic_pointer_cast(processor)) + { + enable_virtual_row = false; + break; + } + } + + /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + if (enable_virtual_row && merge_tree_sources.size() >= 2) + { + for (const auto & merge_tree_source : merge_tree_sources) + { + const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + merge_tree_select_processor->enableVirtualRow(); + } + } + auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index f0b2ead687e..50a77360d46 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,6 +197,8 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } + const Pipe& getPipe() const { return pipe; } + /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 4feef5115bf..0f4b68ddde9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -134,10 +134,10 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - if (add_virtual_row) + if (enable_virtual_row) { /// Turn on virtual row just once. - add_virtual_row = false; + enable_virtual_row = false; const auto & primary_key = storage_snapshot->metadata->primary_key; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 7a562c1a115..57da1039ba9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,13 +65,14 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) + void addVirtualRowToChunk(const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) { - add_virtual_row = add_virtual_row_; index = index_; mark_range_begin = mark_range_begin_; } + void enableVirtualRow() { enable_virtual_row = true; } + private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); @@ -99,7 +100,7 @@ private: /// Should we add a virtual row as the single first chunk. /// Virtual row is useful for read-in-order optimization when multiple parts exist. - bool add_virtual_row = false; + bool enable_virtual_row = false; /// PK index used in virtual row. IMergeTreeDataPart::Index index; /// The first range that might contain the candidate, used in virtual row. diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index 655f0ee6ebe..486b8be2fef 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -19,6 +19,8 @@ public: Status prepare() override; + const MergeTreeSelectProcessorPtr& getProcessor() const { return processor; } + #if defined(OS_LINUX) int schedule() override; #endif diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index c73f79d8dce..c7cce7e60e9 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -12,4 +12,9 @@ 1 2 3 -16384 +16386 +16385 +16386 +16387 +16388 +24578 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 597845564e4..332ee7f58dc 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -27,6 +27,8 @@ INSERT INTO t SELECT number FROM numbers(8192 * 3); +SYSTEM STOP MERGES t; + -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -37,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no filter'; +log_comment = 'preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no filter' +AND log_comment = 'preliminary merge, no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -60,19 +62,20 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'with filter'; +log_comment = 'preliminary merge with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'with filter' +AND log_comment = 'preliminary merge with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; --- Should not impact cases without preliminary merge (might read again when chunk row is less than limit) +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. SELECT x FROM t ORDER BY x ASC @@ -81,14 +84,37 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no impact'; +log_comment = 'no preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no impact' +AND log_comment = 'no preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; From 3f6cdeb8802c04f39d01e4b048fe0381ff200242 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 15 May 2024 18:26:28 +0000 Subject: [PATCH 014/566] add more check --- .../Algorithms/MergingSortedAlgorithm.cpp | 8 ++ src/Processors/QueryPlan/SortingStep.cpp | 73 ++++++++++++------- src/Processors/QueryPlan/SortingStep.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 2 +- .../MergeTree/MergeTreeSelectProcessor.h | 2 + .../02346_fulltext_index_search.sql | 8 +- ...r_optimization_with_virtual_row.reference} | 10 +++ ...n_order_optimization_with_virtual_row.sql} | 20 ++++- 8 files changed, 92 insertions(+), 33 deletions(-) rename tests/queries/0_stateless/{03031_read_in_order_optimization.reference => 03031_read_in_order_optimization_with_virtual_row.reference} (59%) rename tests/queries/0_stateless/{03031_read_in_order_optimization.sql => 03031_read_in_order_optimization_with_virtual_row.sql} (83%) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 7da73349c4a..eb5805087c4 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -8,6 +8,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + MergingSortedAlgorithm::MergingSortedAlgorithm( Block header_, size_t num_inputs, @@ -134,6 +139,9 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); + if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); + if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { /// Get the next block from the corresponding source, if there is one. diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index d728e8fb154..97157b06f19 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -241,38 +241,57 @@ void SortingStep::finishSorting( }); } +void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const +{ + /// We check every step of this pipeline, to make sure virtual row can work correctly. + /// Currently ExpressionTransform is supported, should add other processors if possible. + const auto& pipe = pipeline.getPipe(); + bool enable_virtual_row = true; + std::vector> merge_tree_sources; + for (const auto & processor : pipe.getProcessors()) + { + if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) + { + merge_tree_sources.push_back(merge_tree_source); + } + else if (!std::dynamic_pointer_cast(processor)) + { + enable_virtual_row = false; + break; + } + } + + /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + if (enable_virtual_row && merge_tree_sources.size() >= 2) + { + /// We have to check further in the case of fixed prefix, for example, + /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, + /// merge sort would sort based on b, leading to wrong result in comparison. + auto extractNameAfterDot = [](const String & name) + { + size_t pos = name.find_last_of('.'); + return (pos != String::npos) ? name.substr(pos + 1) : name; + }; + + const ColumnWithTypeAndName & type_and_name = pipeline.getHeader().getByPosition(0); + String column_name = extractNameAfterDot(type_and_name.name); + for (const auto & merge_tree_source : merge_tree_sources) + { + const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + + const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); + if (primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) + merge_tree_select_processor->enableVirtualRow(); + } + } +} + void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescription & result_sort_desc, const UInt64 limit_) { /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - /// We check every step of this pipeline, to make sure virtual row can work correctly. - /// Currently ExpressionTransform is supported, should add other processors if possible. - const auto& pipe = pipeline.getPipe(); - bool enable_virtual_row = true; - std::vector> merge_tree_sources; - for (const auto & processor : pipe.getProcessors()) - { - if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) - { - merge_tree_sources.push_back(merge_tree_source); - } - else if (!std::dynamic_pointer_cast(processor)) - { - enable_virtual_row = false; - break; - } - } - - /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor - if (enable_virtual_row && merge_tree_sources.size() >= 2) - { - for (const auto & merge_tree_source : merge_tree_sources) - { - const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); - merge_tree_select_processor->enableVirtualRow(); - } - } + enableVirtualRow(pipeline); auto transform = std::make_shared( pipeline.getHeader(), diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 52f48f66a32..5f3820c346b 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -116,6 +116,8 @@ private: UInt64 limit_, bool skip_partial_sort = false); + void enableVirtualRow(const QueryPipelineBuilder & pipeline) const; + Type type; SortDescription prefix_description; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 0f4b68ddde9..67b58b53a0d 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -139,7 +139,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() /// Turn on virtual row just once. enable_virtual_row = false; - const auto & primary_key = storage_snapshot->metadata->primary_key; + const auto & primary_key = getPrimaryKey(); MergeTreeReadTask::BlockAndProgress res; res.row_count = 1; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 57da1039ba9..14481be24d3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -73,6 +73,8 @@ public: void enableVirtualRow() { enable_virtual_row = true; } + const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } + private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index 3c172bfdaf7..fb6da10a115 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 1; --- check the query only read 3 granules (6 rows total; each granule has 2 rows) +-- check the query only read 3 granules (6 rows total; each granule has 2 rows; there are 2 extra virtual rows) SYSTEM FLUSH LOGS; -SELECT read_rows==6 from system.query_log +SELECT read_rows==8 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 1;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference similarity index 59% rename from tests/queries/0_stateless/03031_read_in_order_optimization.reference rename to tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index c7cce7e60e9..12c4056ac27 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -3,18 +3,28 @@ 2 3 16386 +======== 16385 16386 16387 16388 24578 +======== 0 1 2 3 16386 +======== 16385 16386 16387 16388 24578 +======== +1 2 +1 2 +1 3 +1 3 +1 4 +1 4 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql similarity index 83% rename from tests/queries/0_stateless/03031_read_in_order_optimization.sql rename to tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 332ee7f58dc..ddcc1498af9 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -51,6 +51,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; +SELECT '========'; -- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), -- all chunks come from the same part. SELECT k @@ -74,6 +75,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -96,6 +98,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; -- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), -- all chunks come from the same part. SELECT k @@ -119,4 +122,19 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; -DROP TABLE t; \ No newline at end of file +DROP TABLE t; + +SELECT '========'; +-- from 02149_read_in_order_fixed_prefix +DROP TABLE IF EXISTS t_read_in_order; + +CREATE TABLE t_read_in_order(a UInt32, b UInt32) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 3; + +SYSTEM STOP MERGES t_read_in_order; + +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); + +SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; From 4a0a4c68b2e66c0d4abfef417376d3225965b459 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 18 May 2024 03:33:42 +0000 Subject: [PATCH 015/566] restrict the case of func pk --- src/Processors/QueryPlan/SortingStep.cpp | 24 +++++-- ...er_optimization_with_virtual_row.reference | 4 ++ ...in_order_optimization_with_virtual_row.sql | 64 +++++++++++++------ 3 files changed, 66 insertions(+), 26 deletions(-) diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 84f90fa782f..addbdd020bb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -262,12 +262,9 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const } } - /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + /// If everything is okay, enable virtual row in MergeTreeSelectProcessor. if (enable_virtual_row && merge_tree_sources.size() >= 2) { - /// We have to check further in the case of fixed prefix, for example, - /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, - /// merge sort would sort based on b, leading to wrong result in comparison. auto extractNameAfterDot = [](const String & name) { size_t pos = name.find_last_of('.'); @@ -278,10 +275,25 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const String column_name = extractNameAfterDot(type_and_name.name); for (const auto & merge_tree_source : merge_tree_sources) { - const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + const auto & merge_tree_select_processor = merge_tree_source->getProcessor(); + /// Check pk is not func based, as we only check type and name in filling in primary key of virtual row. const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); - if (primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) + const auto & actions = primary_key.expression->getActions(); + bool is_okay = true; + for (const auto & action : actions) + { + if (action.node->type != ActionsDAG::ActionType::INPUT) + { + is_okay = false; + break; + } + } + + /// We have to check further in the case of fixed prefix, for example, + /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, + /// merge sort would sort based on b, leading to wrong result in comparison. + if (is_okay && primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) merge_tree_select_processor->enableVirtualRow(); } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 12c4056ac27..b4b1554a7d4 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -28,3 +28,7 @@ 1 3 1 4 1 4 +======== +1 3 +1 2 +1 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index ddcc1498af9..198bf1eb307 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -13,22 +13,22 @@ ORDER BY (x, y, z) SETTINGS index_granularity = 8192, index_granularity_bytes = 10485760; -INSERT INTO t SELECT - number, - number, - number, - number -FROM numbers(8192 * 3); - -INSERT INTO t SELECT - number + (8192 * 3), - number + (8192 * 3), - number + (8192 * 3), - number -FROM numbers(8192 * 3); - SYSTEM STOP MERGES t; +INSERT INTO t SELECT + number, + number, + number, + number +FROM numbers(8192 * 3); + +INSERT INTO t SELECT + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3), + number +FROM numbers(8192 * 3); + -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -126,15 +126,39 @@ DROP TABLE t; SELECT '========'; -- from 02149_read_in_order_fixed_prefix -DROP TABLE IF EXISTS t_read_in_order; +DROP TABLE IF EXISTS fixed_prefix; -CREATE TABLE t_read_in_order(a UInt32, b UInt32) +CREATE TABLE fixed_prefix(a UInt32, b UInt32) ENGINE = MergeTree ORDER BY (a, b) SETTINGS index_granularity = 3; -SYSTEM STOP MERGES t_read_in_order; +SYSTEM STOP MERGES fixed_prefix; -INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; +SELECT a, b FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; + +DROP TABLE fixed_prefix; + +SELECT '========'; +-- currently don't support virtual row in this case +DROP TABLE IF EXISTS function_pk; + +CREATE TABLE function_pk +( + `A` Int64, + `B` Int64 +) +ENGINE = MergeTree ORDER BY (A, -B) +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES function_pk; + +INSERT INTO function_pk values(1,1); +INSERT INTO function_pk values(1,3); +INSERT INTO function_pk values(1,2); + +SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; + +DROP TABLE function_pk; From bd05771faac142853dde6b1461d34e6d3d47e89e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 21 May 2024 04:43:26 +0000 Subject: [PATCH 016/566] temporarily disable a test --- .../03031_read_in_order_optimization_with_virtual_row.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 198bf1eb307..aff9faf3968 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -159,6 +159,8 @@ INSERT INTO function_pk values(1,1); INSERT INTO function_pk values(1,3); INSERT INTO function_pk values(1,2); +-- TODO: handle preliminary merge for this case, temporarily disable it +SET optimize_read_in_order = 0; SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; DROP TABLE function_pk; From 2750f8ca1d9b7336fbfec6fc0e73a8fbf17eadee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 02:27:48 +0200 Subject: [PATCH 017/566] Whitespace --- src/Storages/StorageSet.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 205a90423bf..a8c8e81e23d 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -130,7 +130,6 @@ StorageSetOrJoinBase::StorageSetOrJoinBase( storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - if (relative_path_.empty()) throw Exception(ErrorCodes::INCORRECT_FILE_NAME, "Join and Set storages require data path"); From 6e08f415c49afeac27ce08f97cde365dbf5940a2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 04:26:14 +0200 Subject: [PATCH 018/566] Preparation --- base/base/DecomposedFloat.h | 9 ++++++++ base/base/EnumReflection.h | 2 +- base/base/extended_types.h | 14 ++++-------- base/base/wide_integer_impl.h | 2 +- .../AggregateFunctionGroupArray.cpp | 4 ++-- .../AggregateFunctionGroupArrayMoving.cpp | 2 +- .../AggregateFunctionIntervalLengthSum.cpp | 4 ++-- .../AggregateFunctionSparkbar.cpp | 6 ++--- src/AggregateFunctions/AggregateFunctionSum.h | 6 ++--- src/AggregateFunctions/QuantileTDigest.h | 2 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- .../ReservoirSamplerDeterministic.h | 2 +- src/Columns/ColumnVector.cpp | 16 +++++++------- src/Common/FieldVisitorConvertToNumber.h | 4 ++-- src/Common/HashTable/HashTable.h | 2 +- src/Common/NaNUtils.h | 14 ++++++------ src/Common/findExtreme.cpp | 2 +- src/Common/transformEndianness.h | 2 +- src/Core/AccurateComparison.h | 18 +++++++-------- src/Core/DecimalFunctions.h | 2 +- src/DataTypes/DataTypesDecimal.cpp | 5 +++-- src/DataTypes/NumberTraits.h | 22 +++++++++---------- src/Formats/ProtobufSerializer.cpp | 2 +- src/Functions/DivisionUtils.h | 16 +++++++------- src/Functions/FunctionMathUnary.h | 4 ++-- src/Functions/FunctionsConversion.cpp | 12 +++++----- src/Functions/FunctionsJSON.h | 4 ++-- src/Functions/FunctionsRound.h | 2 +- src/Functions/FunctionsVisitParam.h | 2 +- src/Functions/abs.cpp | 2 +- src/Functions/array/arrayAggregation.cpp | 2 +- src/Functions/factorial.cpp | 2 +- src/Functions/if.cpp | 16 +++++++------- src/Functions/minus.cpp | 4 ++-- src/Functions/moduloOrZero.cpp | 2 +- src/Functions/multiply.cpp | 4 ++-- src/Functions/plus.cpp | 4 ++-- src/Functions/sign.cpp | 2 +- src/IO/ReadHelpers.h | 2 +- src/IO/WriteHelpers.h | 10 ++++----- src/Interpreters/RowRefs.cpp | 2 +- 41 files changed, 120 insertions(+), 116 deletions(-) diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 0997c39db16..b5bc3f08357 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -96,6 +96,15 @@ struct DecomposedFloat && ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)); } + bool isFinite() const + { + return exponent() != ((1ull << Traits::exponent_bits) - 1); + } + + bool isNaN() const + { + return !isFinite() && (mantissa() != 0); + } /// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic. /// This function is generic, big integers (128, 256 bit) are supported as well. diff --git a/base/base/EnumReflection.h b/base/base/EnumReflection.h index 4a9de4d17a3..963c7e3f1b9 100644 --- a/base/base/EnumReflection.h +++ b/base/base/EnumReflection.h @@ -4,7 +4,7 @@ #include -template concept is_enum = std::is_enum_v; +template concept is_enum = std::is_enum_v; namespace detail { diff --git a/base/base/extended_types.h b/base/base/extended_types.h index de654152649..7ddf7de7e22 100644 --- a/base/base/extended_types.h +++ b/base/base/extended_types.h @@ -43,7 +43,7 @@ template <> struct is_unsigned { static constexpr bool value = true; }; template inline constexpr bool is_unsigned_v = is_unsigned::value; -template concept is_integer = +template concept is_integer = std::is_integral_v || std::is_same_v || std::is_same_v @@ -65,16 +65,10 @@ template <> struct is_arithmetic { static constexpr bool value = true; template inline constexpr bool is_arithmetic_v = is_arithmetic::value; -template -struct is_floating_point // NOLINT(readability-identifier-naming) -{ - static constexpr bool value = std::is_floating_point_v; -}; +template concept is_floating_point = + std::is_floating_point_v + || std::is_same_v; -template <> struct is_floating_point { static constexpr bool value = true; }; - -template -inline constexpr bool is_floating_point_v = is_floating_point::value; #define FOR_EACH_ARITHMETIC_TYPE(M) \ M(DataTypeDate) \ diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index c950fd27fa3..d0bbd7df9d4 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - is_floating_point_v, + std::is_floating_point_v || std::is_same_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 0b478fe3c04..3a0bbb001c3 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -73,7 +73,7 @@ template struct GroupArraySamplerData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; @@ -115,7 +115,7 @@ template struct GroupArrayNumericData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index ee6a82686c5..a9a09d7abd5 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -38,7 +38,7 @@ template struct MovingData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); using Accumulator = T; diff --git a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp index 06156643aa0..e5404add820 100644 --- a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp @@ -187,7 +187,7 @@ public: static DataTypePtr createResultType() { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) return std::make_shared(); return std::make_shared(); } @@ -227,7 +227,7 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); else assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index 5b6fc3b315c..33412d50b21 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -50,7 +50,7 @@ struct AggregateFunctionSparkbarData auto [it, inserted] = points.insert({x, y}); if (!inserted) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { it->getMapped() += y; return it->getMapped(); @@ -197,7 +197,7 @@ private: Y res; bool has_overfllow = false; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) res = histogram[index] + point.getMapped(); else has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res); @@ -246,7 +246,7 @@ private: } constexpr auto levels_num = static_cast(BAR_LEVELS - 1); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { y = y / (y_max / levels_num) + 1; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index c663c632280..d0d600be70b 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -69,7 +69,7 @@ struct AggregateFunctionSumData size_t count = end - start; const auto * end_ptr = ptr + count; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// Compiler cannot unroll this loop, do it manually. /// (at least for floats, most likely due to the lack of -fassociative-math) @@ -193,7 +193,7 @@ struct AggregateFunctionSumData Impl::add(sum, local_sum); return; } - else if constexpr (is_floating_point_v) + else if constexpr (is_floating_point) { /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) @@ -306,7 +306,7 @@ struct AggregateFunctionSumData template struct AggregateFunctionSumKahanData { - static_assert(is_floating_point_v, + static_assert(is_floating_point, "It doesn't make sense to use Kahan Summation algorithm for non floating point types"); T sum{}; diff --git a/src/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h index 408e500e941..a693c57e6d8 100644 --- a/src/AggregateFunctions/QuantileTDigest.h +++ b/src/AggregateFunctions/QuantileTDigest.h @@ -379,7 +379,7 @@ public: ResultType getImpl(Float64 level) { if (centroids.empty()) - return is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; + return is_floating_point ? std::numeric_limits::quiet_NaN() : 0; compress(); diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 182a49af2ca..c21e76614c1 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -278,6 +278,6 @@ private: if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index c9afcb21549..7fe5d23f4e4 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -272,7 +272,7 @@ private: if (OnEmpty == ReservoirSamplerDeterministicOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSamplerDeterministic"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 19849b8a1c6..2b137231faa 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -118,7 +118,7 @@ struct ColumnVector::less_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -150,7 +150,7 @@ struct ColumnVector::greater_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -224,9 +224,9 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction iota(res.data(), data_size, IColumn::Permutation::value_type(0)); - if constexpr (has_find_extreme_implementation && !std::is_floating_point_v) + if constexpr (has_find_extreme_implementation && !is_floating_point) { - /// Disabled for:floating point + /// Disabled for floating point: /// * floating point: We don't deal with nan_direction_hint /// * stability::Stable: We might return any value, not the first if ((limit == 1) && (stability == IColumn::PermutationSortStability::Unstable)) @@ -256,7 +256,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction bool sort_is_stable = stability == IColumn::PermutationSortStability::Stable; /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point) || !sort_is_stable; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. if (data_size >= 256 && data_size <= std::numeric_limits::max() && use_radix_sort) @@ -283,7 +283,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -330,7 +330,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct if constexpr (is_arithmetic_v && !is_big_int_v) { /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point) || !sort_is_stable; size_t size = end - begin; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. @@ -353,7 +353,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point && nan_direction_hint < 0) { size_t nans_to_move = 0; diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index 646caadce35..ebd084df54d 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -58,7 +58,7 @@ public: T operator() (const Float64 & x) const { - if constexpr (!is_floating_point_v) + if constexpr (!is_floating_point) { if (!isFinite(x)) { @@ -88,7 +88,7 @@ public: template T operator() (const DecimalField & x) const { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) return x.getValue().template convertTo() / x.getScaleMultiplier().template convertTo(); else return (x.getValue() / x.getScaleMultiplier()).template convertTo(); diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index fd8832a56a3..8237c81461f 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -91,7 +91,7 @@ inline bool bitEquals(T && a, T && b) { using RealT = std::decay_t; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) /// Note that memcmp with constant size is compiler builtin. return 0 == memcmp(&a, &b, sizeof(RealT)); /// NOLINT else diff --git a/src/Common/NaNUtils.h b/src/Common/NaNUtils.h index 0e885541599..3e4af902104 100644 --- a/src/Common/NaNUtils.h +++ b/src/Common/NaNUtils.h @@ -3,24 +3,24 @@ #include #include #include +#include template inline bool isNaN(T x) { /// To be sure, that this function is zero-cost for non-floating point types. - if constexpr (is_floating_point_v) - return std::isnan(x); + if constexpr (is_floating_point) + return DecomposedFloat(x).isNaN(); else return false; } - template inline bool isFinite(T x) { - if constexpr (is_floating_point_v) - return std::isfinite(x); + if constexpr (is_floating_point) + return DecomposedFloat(x).isFinite(); else return true; } @@ -28,7 +28,7 @@ inline bool isFinite(T x) template bool canConvertTo(Float64 x) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) return true; if (!isFinite(x)) return false; @@ -41,7 +41,7 @@ bool canConvertTo(Float64 x) template T NaNOrZero() { - if constexpr (is_floating_point_v) + if constexpr (std::is_floating_point_v) return std::numeric_limits::quiet_NaN(); else return {}; diff --git a/src/Common/findExtreme.cpp b/src/Common/findExtreme.cpp index ce3bbb86d7c..a29750b848a 100644 --- a/src/Common/findExtreme.cpp +++ b/src/Common/findExtreme.cpp @@ -47,7 +47,7 @@ MULTITARGET_FUNCTION_AVX2_SSE42( /// Unroll the loop manually for floating point, since the compiler doesn't do it without fastmath /// as it might change the return value - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { constexpr size_t unroll_block = 512 / sizeof(T); /// Chosen via benchmarks with AVX2 so YMMV size_t unrolled_end = i + (((count - i) / unroll_block) * unroll_block); diff --git a/src/Common/transformEndianness.h b/src/Common/transformEndianness.h index 2a0c45efe38..e6e04ec75af 100644 --- a/src/Common/transformEndianness.h +++ b/src/Common/transformEndianness.h @@ -38,7 +38,7 @@ inline void transformEndianness(T & x) } template -requires is_floating_point_v +requires is_floating_point inline void transformEndianness(T & value) { if constexpr (ToEndian != FromEndian) diff --git a/src/Core/AccurateComparison.h b/src/Core/AccurateComparison.h index c1e93b8055a..87ff14e40e7 100644 --- a/src/Core/AccurateComparison.h +++ b/src/Core/AccurateComparison.h @@ -25,7 +25,7 @@ bool lessOp(A a, B b) return a < b; /// float vs float - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) return a < b; /// anything vs NaN @@ -49,7 +49,7 @@ bool lessOp(A a, B b) } /// int vs float - if constexpr (is_integer && is_floating_point_v) + if constexpr (is_integer && is_floating_point) { if constexpr (sizeof(A) <= 4) return static_cast(a) < static_cast(b); @@ -57,7 +57,7 @@ bool lessOp(A a, B b) return DecomposedFloat(b).greater(a); } - if constexpr (is_floating_point_v && is_integer) + if constexpr (is_floating_point && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) < static_cast(b); @@ -65,8 +65,8 @@ bool lessOp(A a, B b) return DecomposedFloat(a).less(b); } - static_assert(is_integer || is_floating_point_v); - static_assert(is_integer || is_floating_point_v); + static_assert(is_integer || is_floating_point); + static_assert(is_integer || is_floating_point); UNREACHABLE(); } @@ -101,7 +101,7 @@ bool equalsOp(A a, B b) return a == b; /// float vs float - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) return a == b; /// anything vs NaN @@ -125,7 +125,7 @@ bool equalsOp(A a, B b) } /// int vs float - if constexpr (is_integer && is_floating_point_v) + if constexpr (is_integer && is_floating_point) { if constexpr (sizeof(A) <= 4) return static_cast(a) == static_cast(b); @@ -133,7 +133,7 @@ bool equalsOp(A a, B b) return DecomposedFloat(b).equals(a); } - if constexpr (is_floating_point_v && is_integer) + if constexpr (is_floating_point && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) == static_cast(b); @@ -163,7 +163,7 @@ inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result) return true; } - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) { /// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type. if (isNaN(value)) diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index c5bc4ad70f6..435cef61145 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -310,7 +310,7 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index 77a7a3e7237..d87eff97675 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -269,9 +270,9 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, static constexpr bool throw_exception = std::is_same_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { - if (!std::isfinite(value)) + if (!isFinite(value)) { if constexpr (throw_exception) throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow. Cannot convert infinity or NaN to decimal", ToDataType::family_name); diff --git a/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h index ad1e9eaa67b..ee0d9812097 100644 --- a/src/DataTypes/NumberTraits.h +++ b/src/DataTypes/NumberTraits.h @@ -74,7 +74,7 @@ template struct ResultOfAdditionMultiplication { using Type = typename Construct< is_signed_v || is_signed_v, - is_floating_point_v || is_floating_point_v, + is_floating_point || is_floating_point, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -82,7 +82,7 @@ template struct ResultOfSubtraction { using Type = typename Construct< true, - is_floating_point_v || is_floating_point_v, + is_floating_point || is_floating_point, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -113,7 +113,7 @@ template struct ResultOfModulo /// Example: toInt32(-199) % toUInt8(200) will return -199 that does not fit in Int8, only in Int16. static constexpr size_t size_of_result = result_is_signed ? nextSize(sizeof(B)) : sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfPositiveModulo @@ -121,21 +121,21 @@ template struct ResultOfPositiveModulo /// function positive_modulo always return non-negative number. static constexpr size_t size_of_result = sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfModuloLegacy { using Type0 = typename Construct || is_signed_v, false, sizeof(B)>::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfNegate { using Type = typename Construct< true, - is_floating_point_v, + is_floating_point, is_signed_v ? sizeof(A) : nextSize(sizeof(A))>::Type; }; @@ -143,7 +143,7 @@ template struct ResultOfAbs { using Type = typename Construct< false, - is_floating_point_v, + is_floating_point, sizeof(A)>::Type; }; @@ -154,7 +154,7 @@ template struct ResultOfBit using Type = typename Construct< is_signed_v || is_signed_v, false, - is_floating_point_v || is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; + is_floating_point || is_floating_point ? 8 : max(sizeof(A), sizeof(B))>::Type; }; template struct ResultOfBitNot @@ -180,7 +180,7 @@ template struct ResultOfBitNot template struct ResultOfIf { - static constexpr bool has_float = is_floating_point_v || is_floating_point_v; + static constexpr bool has_float = is_floating_point || is_floating_point; static constexpr bool has_integer = is_integer || is_integer; static constexpr bool has_signed = is_signed_v || is_signed_v; static constexpr bool has_unsigned = !is_signed_v || !is_signed_v; @@ -189,7 +189,7 @@ struct ResultOfIf static constexpr size_t max_size_of_unsigned_integer = max(is_signed_v ? 0 : sizeof(A), is_signed_v ? 0 : sizeof(B)); static constexpr size_t max_size_of_signed_integer = max(is_signed_v ? sizeof(A) : 0, is_signed_v ? sizeof(B) : 0); static constexpr size_t max_size_of_integer = max(is_integer ? sizeof(A) : 0, is_integer ? sizeof(B) : 0); - static constexpr size_t max_size_of_float = max(is_floating_point_v ? sizeof(A) : 0, is_floating_point_v ? sizeof(B) : 0); + static constexpr size_t max_size_of_float = max(is_floating_point ? sizeof(A) : 0, is_floating_point ? sizeof(B) : 0); using ConstructedType = typename Construct= max_size_of_float) @@ -211,7 +211,7 @@ template struct ToInteger using Type = typename Construct< is_signed_v, false, - is_floating_point_v ? 8 : sizeof(A)>::Type; + is_floating_point ? 8 : sizeof(A)>::Type; }; diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 7f03bdeb45d..86b11f45b72 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -541,7 +541,7 @@ namespace case FieldTypeId::TYPE_ENUM: { - if (is_floating_point_v) + if (is_floating_point) incompatibleColumnType(TypeName); write_function = [this](NumberType value) diff --git a/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h index 1a241c7171a..e8f5da342f8 100644 --- a/src/Functions/DivisionUtils.h +++ b/src/Functions/DivisionUtils.h @@ -47,9 +47,9 @@ inline auto checkedDivision(A a, B b) { throwIfDivisionLeadsToFPE(a, b); - if constexpr (is_big_int_v && is_floating_point_v) + if constexpr (is_big_int_v && is_floating_point) return static_cast(a) / b; - else if constexpr (is_big_int_v && is_floating_point_v) + else if constexpr (is_big_int_v && is_floating_point) return a / static_cast(b); else if constexpr (is_big_int_v && is_big_int_v) return static_cast(a / b); @@ -86,17 +86,17 @@ struct DivideIntegralImpl { /// Comparisons are not strict to avoid rounding issues when operand is implicitly casted to float. - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(a) || a >= std::numeric_limits::max() || a <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(b) || b >= std::numeric_limits::max() || b <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); auto res = checkedDivision(CastA(a), CastB(b)); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(res) || res >= static_cast(std::numeric_limits::max()) || res <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division, because it will produce infinite or too large number"); @@ -122,18 +122,18 @@ struct ModuloImpl template static Result apply(A a, B b) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return static_cast(a) - trunc(static_cast(a) / static_cast(b)) * static_cast(b); } else { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(a) || a > std::numeric_limits::max() || a < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(b) || b > std::numeric_limits::max() || b < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); diff --git a/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h index 8395855a564..2cbd9b2e03c 100644 --- a/src/Functions/FunctionMathUnary.h +++ b/src/Functions/FunctionMathUnary.h @@ -66,7 +66,7 @@ private: /// Process all data as a whole and use FastOps implementation /// If the argument is integer, convert to Float64 beforehand - if constexpr (!is_floating_point_v) + if constexpr (!is_floating_point) { PODArray tmp_vec(size); for (size_t i = 0; i < size; ++i) @@ -150,7 +150,7 @@ private: { using Types = std::decay_t; using Type = typename Types::RightType; - using ReturnType = std::conditional_t, Float64, Type>; + using ReturnType = std::conditional_t, Float64, Type>; using ColVecType = ColumnVectorOrDecimal; const auto col_vec = checkAndGetColumn(col.column.get()); diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 44d0b750af9..8512ea5726f 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -638,7 +638,7 @@ inline void convertFromTime(DataTypeDateTime::FieldType & x, t template void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (precise_float_parsing) readFloatTextPrecise(x, rb); @@ -702,7 +702,7 @@ inline void parseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb template bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (precise_float_parsing) return tryReadFloatTextPrecise(x, rb); @@ -1767,7 +1767,7 @@ struct ConvertImpl else { /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + if constexpr (is_floating_point && !is_floating_point) { if (!isFinite(vec_from[i])) { @@ -2253,9 +2253,9 @@ private: using RightT = typename RightDataType::FieldType; static constexpr bool bad_left = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point || is_big_int_v || is_signed_v; static constexpr bool bad_right = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point || is_big_int_v || is_signed_v; /// Disallow int vs UUID conversion (but support int vs UInt128 conversion) if constexpr ((bad_left && std::is_same_v) || @@ -2578,7 +2578,7 @@ struct ToNumberMonotonicity /// Float cases. /// When converting to Float, the conversion is always monotonic. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) return { .is_monotonic = true, .is_always_monotonic = true }; const auto * low_cardinality = typeid_cast(&type); diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 8a2ad457d34..65c1a6fb2d2 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -741,7 +741,7 @@ public: switch (element.type()) { case ElementType::DOUBLE: - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { /// We permit inaccurate conversion of double to float. /// Example: double 0.1 from JSON is not representable in float. @@ -769,7 +769,7 @@ public: case ElementType::STRING: { auto rb = ReadBufferFromMemory{element.getString()}; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (!tryReadFloatText(value, rb) || !rb.eof()) return false; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index ab62deed45d..46fbe70458d 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -453,7 +453,7 @@ template - using FunctionRoundingImpl = std::conditional_t, + using FunctionRoundingImpl = std::conditional_t, FloatRoundingImpl, IntegerRoundingImpl>; diff --git a/src/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h index 5e13fbbad5c..fd59ea3a9c1 100644 --- a/src/Functions/FunctionsVisitParam.h +++ b/src/Functions/FunctionsVisitParam.h @@ -57,7 +57,7 @@ struct ExtractNumericType ResultType x = 0; if (!in.eof()) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) tryReadFloatText(x, in); else tryReadIntText(x, in); diff --git a/src/Functions/abs.cpp b/src/Functions/abs.cpp index 9ac2363f765..3a618686b30 100644 --- a/src/Functions/abs.cpp +++ b/src/Functions/abs.cpp @@ -22,7 +22,7 @@ struct AbsImpl return a < 0 ? static_cast(~a) + 1 : static_cast(a); else if constexpr (is_integer && is_unsigned_v) return static_cast(a); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) return static_cast(std::abs(a)); } diff --git a/src/Functions/array/arrayAggregation.cpp b/src/Functions/array/arrayAggregation.cpp index 03aa5fb9086..9c17e1095c5 100644 --- a/src/Functions/array/arrayAggregation.cpp +++ b/src/Functions/array/arrayAggregation.cpp @@ -85,7 +85,7 @@ struct ArrayAggregateResultImpl std::conditional_t, Decimal128, std::conditional_t, Decimal256, std::conditional_t, Decimal128, - std::conditional_t, Float64, + std::conditional_t, Float64, std::conditional_t, Int64, UInt64>>>>>>>>>>>; }; diff --git a/src/Functions/factorial.cpp b/src/Functions/factorial.cpp index 3b46d9e867f..32bdc84b954 100644 --- a/src/Functions/factorial.cpp +++ b/src/Functions/factorial.cpp @@ -21,7 +21,7 @@ struct FactorialImpl static NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_floating_point_v || is_over_big_int) + if constexpr (is_floating_point || is_over_big_int) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type of argument of function factorial, should not be floating point or big int"); diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 7a6d37d810d..dded3d46652 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -87,7 +87,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b[b_index], res[i]) } @@ -105,7 +105,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b[i], res[i]) } @@ -122,7 +122,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b[b_index], res[i]) } @@ -138,7 +138,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b[i], res[i]) } @@ -162,7 +162,7 @@ inline void fillVectorConstant(const ArrayCond & cond, const ArrayA & a, B b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b, res[i]) } @@ -178,7 +178,7 @@ inline void fillVectorConstant(const ArrayCond & cond, const ArrayA & a, B b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b, res[i]) } @@ -200,7 +200,7 @@ inline void fillConstantVector(const ArrayCond & cond, A a, const ArrayB & b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a, b[b_index], res[i]) } @@ -216,7 +216,7 @@ inline void fillConstantVector(const ArrayCond & cond, A a, const ArrayB & b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a, b[i], res[i]) } diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index 4d86442ad7e..cf318db805b 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp index d233e4e4ce2..5a4d1539345 100644 --- a/src/Functions/moduloOrZero.cpp +++ b/src/Functions/moduloOrZero.cpp @@ -17,7 +17,7 @@ struct ModuloOrZeroImpl template static Result apply(A a, B b) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b); diff --git a/src/Functions/multiply.cpp b/src/Functions/multiply.cpp index 559143a43b4..740ab81d0d9 100644 --- a/src/Functions/multiply.cpp +++ b/src/Functions/multiply.cpp @@ -18,8 +18,8 @@ struct MultiplyImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) * static_cast(static_cast(b)); } diff --git a/src/Functions/plus.cpp b/src/Functions/plus.cpp index 00136e50c5b..26921713f78 100644 --- a/src/Functions/plus.cpp +++ b/src/Functions/plus.cpp @@ -19,8 +19,8 @@ struct PlusImpl /// Next everywhere, static_cast - so that there is no wrong result in expressions of the form Int64 c = UInt32(a) * Int32(-1). if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) + static_cast(static_cast(b)); } diff --git a/src/Functions/sign.cpp b/src/Functions/sign.cpp index 16f0efd2201..a6396a58c0c 100644 --- a/src/Functions/sign.cpp +++ b/src/Functions/sign.cpp @@ -13,7 +13,7 @@ struct SignImpl static NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_decimal || is_floating_point_v) + if constexpr (is_decimal || is_floating_point) return a < A(0) ? -1 : a == A(0) ? 0 : 1; else if constexpr (is_signed_v) return a < 0 ? -1 : a == 0 ? 0 : 1; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 6dda5a9b089..f1fcbb07af5 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1382,7 +1382,7 @@ inline bool tryReadText(IPv4 & x, ReadBuffer & buf) { return tryReadIPv4Text(x, inline bool tryReadText(IPv6 & x, ReadBuffer & buf) { return tryReadIPv6Text(x, buf); } template -requires is_floating_point_v +requires is_floating_point inline void readText(T & x, ReadBuffer & buf) { readFloatText(x, buf); } inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index cdeabfcf352..a4eefeaffe2 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -150,7 +150,7 @@ inline void writeBoolText(bool x, WriteBuffer & buf) template -requires is_floating_point_v +requires is_floating_point inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; @@ -182,7 +182,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } template -requires is_floating_point_v +requires is_floating_point inline void writeFloatText(T x, WriteBuffer & buf) { using Converter = DoubleConverter; @@ -530,7 +530,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) bool is_finite = isFinite(x); const bool need_quote = (is_integer && (sizeof(T) >= 8) && settings.json.quote_64bit_integers) - || (settings.json.quote_denormals && !is_finite) || (is_floating_point_v && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); + || (settings.json.quote_denormals && !is_finite) || (is_floating_point && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); if (need_quote) writeChar('"', ostr); @@ -541,7 +541,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) writeCString("null", ostr); else { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (std::signbit(x)) { @@ -1065,7 +1065,7 @@ inline void writeText(is_integer auto x, WriteBuffer & buf) } template -requires is_floating_point_v +requires is_floating_point inline void writeText(T x, WriteBuffer & buf) { writeFloatText(x, buf); } inline void writeText(is_enum auto x, WriteBuffer & buf) { writeText(magic_enum::enum_name(x), buf); } diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 9785ba46dab..c5ffbb96d6f 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -183,7 +183,7 @@ private: if (sorted.load(std::memory_order_relaxed)) return; - if constexpr (std::is_arithmetic_v && !std::is_floating_point_v) + if constexpr (std::is_arithmetic_v && !std::is_floating_point) { if (likely(entries.size() > 256)) { From bf2a8f6a7f6eb8073b60468058f8259cf4a4f341 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 20:43:02 +0200 Subject: [PATCH 019/566] Preparation --- base/base/BFloat16.h | 17 +- src/AggregateFunctions/AggregateFunctionSum.h | 13 +- .../AggregateFunctionUniq.h | 2 +- src/Core/DecimalFunctions.h | 10 +- src/Core/iostream_debug_helpers.cpp | 149 ------------------ src/Core/iostream_debug_helpers.h | 49 ------ src/DataTypes/DataTypesDecimal.cpp | 15 +- src/Dictionaries/RangeHashedDictionary.h | 3 +- src/Functions/FunctionsRound.h | 3 +- src/Functions/array/mapPopulateSeries.cpp | 32 ++-- src/Functions/exp.cpp | 9 +- src/Functions/log.cpp | 9 +- src/Functions/minus.cpp | 4 +- src/Functions/sigmoid.cpp | 10 +- src/Functions/tanh.cpp | 9 +- src/IO/WriteHelpers.h | 14 +- src/Interpreters/RowRefs.cpp | 2 +- src/Parsers/iostream_debug_helpers.cpp | 35 ---- src/Parsers/iostream_debug_helpers.h | 17 -- 19 files changed, 110 insertions(+), 292 deletions(-) delete mode 100644 src/Core/iostream_debug_helpers.cpp delete mode 100644 src/Core/iostream_debug_helpers.h delete mode 100644 src/Parsers/iostream_debug_helpers.cpp delete mode 100644 src/Parsers/iostream_debug_helpers.h diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 17c3ebe9ef3..99eab5c67cb 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -1,9 +1,22 @@ #pragma once +#include + + using BFloat16 = __bf16; namespace std { - inline constexpr bool isfinite(BFloat16) { return true; } - inline constexpr bool signbit(BFloat16) { return false; } + inline constexpr bool isfinite(BFloat16 x) { return (bit_cast(x) & 0b0111111110000000) != 0b0111111110000000; } + inline constexpr bool signbit(BFloat16 x) { return bit_cast(x) & 0b1000000000000000; } +} + +inline Float32 BFloat16ToFloat32(BFloat16 x) +{ + return bit_cast(static_cast(bit_cast(x)) << 16); +} + +inline BFloat16 Float32ToBFloat16(Float32 x) +{ + return bit_cast(std::bit_cast(x) >> 16); } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index d0d600be70b..f6c51241a5c 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -193,12 +193,11 @@ struct AggregateFunctionSumData Impl::add(sum, local_sum); return; } - else if constexpr (is_floating_point) + else if constexpr (is_floating_point && (sizeof(Value) == 4 || sizeof(Value) == 8)) { - /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned + /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) - static_assert(sizeof(Value) == 4 || sizeof(Value) == 8); - using equivalent_integer = typename std::conditional_t; + using EquivalentInteger = typename std::conditional_t; constexpr size_t unroll_count = 128 / sizeof(T); T partial_sums[unroll_count]{}; @@ -209,11 +208,11 @@ struct AggregateFunctionSumData { for (size_t i = 0; i < unroll_count; ++i) { - equivalent_integer value; - std::memcpy(&value, &ptr[i], sizeof(Value)); + EquivalentInteger value; + memcpy(&value, &ptr[i], sizeof(Value)); value &= (!condition_map[i] != add_if_zero) - 1; Value d; - std::memcpy(&d, &value, sizeof(Value)); + memcpy(&d, &value, sizeof(Value)); Impl::add(partial_sums[i], d); } ptr += unroll_count; diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index cef23f766c7..cd2d3c1eb18 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -257,7 +257,7 @@ template struct AggregateFunctionUniqTraits { static UInt64 hash(T x) { - if constexpr (std::is_same_v || std::is_same_v) + if constexpr (is_floating_point) { return bit_cast(x); } diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 435cef61145..abd660a8a7f 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -17,6 +17,7 @@ class DataTypeNumber; namespace ErrorCodes { + extern const int NOT_IMPLEMENTED; extern const int DECIMAL_OVERFLOW; extern const int ARGUMENT_OUT_OF_BOUND; } @@ -310,7 +311,14 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (is_floating_point) + if constexpr (std::is_same_v) + { + if constexpr (throw_exception) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from Decimal to BFloat16 is not implemented"); + else + return ReturnType(false); + } + else if constexpr (is_floating_point) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/Core/iostream_debug_helpers.cpp b/src/Core/iostream_debug_helpers.cpp deleted file mode 100644 index 38e61ac4fca..00000000000 --- a/src/Core/iostream_debug_helpers.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include "iostream_debug_helpers.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -template <> -std::ostream & operator<< (std::ostream & stream, const Field & what) -{ - stream << applyVisitor(FieldVisitorDump(), what); - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what) -{ - stream << "NameAndTypePair(name = " << what.name << ", type = " << what.type << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IDataType & what) -{ - stream << "IDataType(name = " << what.getName() << ", default = " << what.getDefault() << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IStorage & what) -{ - auto table_id = what.getStorageID(); - stream << "IStorage(name = " << what.getName() << ", tableName = " << table_id.table_name << ") {" - << what.getInMemoryMetadataPtr()->getColumns().getAllPhysical().toString() << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const TableLockHolder &) -{ - stream << "TableStructureReadLock()"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what) -{ - stream << "IFunction(name = " << what.getName() << ", variadic = " << what.isVariadic() << ", args = " << what.getNumberOfArguments() - << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Block & what) -{ - stream << "Block(" - << "num_columns = " << what.columns() << "){" << what.dumpStructure() << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what) -{ - stream << "ColumnWithTypeAndName(name = " << what.name << ", type = " << *what.type << ", column = "; - return dumpValue(stream, what.column) << ")"; -} - -std::ostream & operator<<(std::ostream & stream, const IColumn & what) -{ - stream << "IColumn(" << what.dumpStructure() << ")"; - stream << "{"; - for (size_t i = 0; i < what.size(); ++i) - { - if (i) - stream << ", "; - stream << applyVisitor(FieldVisitorDump(), what[i]); - } - stream << "}"; - - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Packet & what) -{ - stream << "Packet(" - << "type = " << what.type; - // types description: Core/Protocol.h - if (what.exception) - stream << "exception = " << what.exception.get(); - // TODO: profile_info - stream << ") {" << what.block << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what) -{ - stream << "ExpressionActions(" << what.dumpActions() << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what) -{ - stream << "SyntaxAnalyzerResult{"; - stream << "storage=" << what.storage << "; "; - if (!what.source_columns.empty()) - { - stream << "source_columns="; - dumpValue(stream, what.source_columns); - stream << "; "; - } - if (!what.aliases.empty()) - { - stream << "aliases="; - dumpValue(stream, what.aliases); - stream << "; "; - } - if (!what.array_join_result_to_source.empty()) - { - stream << "array_join_result_to_source="; - dumpValue(stream, what.array_join_result_to_source); - stream << "; "; - } - if (!what.array_join_alias_to_name.empty()) - { - stream << "array_join_alias_to_name="; - dumpValue(stream, what.array_join_alias_to_name); - stream << "; "; - } - if (!what.array_join_name_to_alias.empty()) - { - stream << "array_join_name_to_alias="; - dumpValue(stream, what.array_join_name_to_alias); - stream << "; "; - } - stream << "rewrite_subqueries=" << what.rewrite_subqueries << "; "; - stream << "}"; - - return stream; -} - -} diff --git a/src/Core/iostream_debug_helpers.h b/src/Core/iostream_debug_helpers.h deleted file mode 100644 index e40bf74583e..00000000000 --- a/src/Core/iostream_debug_helpers.h +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once -#include - -namespace DB -{ - -// Use template to disable implicit casting for certain overloaded types such as Field, which leads -// to overload resolution ambiguity. -class Field; -template -requires std::is_same_v -std::ostream & operator<<(std::ostream & stream, const T & what); - -struct NameAndTypePair; -std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what); - -class IDataType; -std::ostream & operator<<(std::ostream & stream, const IDataType & what); - -class IStorage; -std::ostream & operator<<(std::ostream & stream, const IStorage & what); - -class IFunctionOverloadResolver; -std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what); - -class IFunctionBase; -std::ostream & operator<<(std::ostream & stream, const IFunctionBase & what); - -class Block; -std::ostream & operator<<(std::ostream & stream, const Block & what); - -struct ColumnWithTypeAndName; -std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what); - -class IColumn; -std::ostream & operator<<(std::ostream & stream, const IColumn & what); - -struct Packet; -std::ostream & operator<<(std::ostream & stream, const Packet & what); - -class ExpressionActions; -std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what); - -struct TreeRewriterResult; -std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what); -} - -/// some operator<< should be declared before operator<<(... std::shared_ptr<>) -#include diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index d87eff97675..e0304e46b05 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -20,6 +20,7 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int DECIMAL_OVERFLOW; + extern const int NOT_IMPLEMENTED; } @@ -262,15 +263,19 @@ FOR_EACH_ARITHMETIC_TYPE(INVOKE); template requires (is_arithmetic_v && IsDataTypeDecimal) -ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType & result) +ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value*/, UInt32 /*scale*/, typename ToDataType::FieldType & /*result*/) { - using FromFieldType = typename FromDataType::FieldType; +/* using FromFieldType = typename FromDataType::FieldType; using ToFieldType = typename ToDataType::FieldType; using ToNativeType = typename ToFieldType::NativeType; static constexpr bool throw_exception = std::is_same_v; - if constexpr (is_floating_point) + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from BFloat16 to Decimal is not implemented"); + } + else if constexpr (is_floating_point) { if (!isFinite(value)) { @@ -302,7 +307,9 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); else return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); - } + }*/ + + return ReturnType(); } #define DISPATCH(FROM_DATA_TYPE, TO_DATA_TYPE) \ diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index bf004dbe32b..4950e7c8ee6 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -298,7 +298,8 @@ namespace impl using Types = std::decay_t; using DataType = typename Types::LeftType; - if constexpr (IsDataTypeDecimalOrNumber || IsDataTypeDateOrDateTime || IsDataTypeEnum) + if constexpr ((IsDataTypeDecimalOrNumber || IsDataTypeDateOrDateTime || IsDataTypeEnum) + && !std::is_same_v) { using ColumnType = typename DataType::ColumnType; func(TypePair()); diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 46fbe70458d..7eea0d74975 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -579,7 +579,8 @@ public: using Types = std::decay_t; using DataType = typename Types::LeftType; - if constexpr (IsDataTypeNumber || IsDataTypeDecimal) + if constexpr ((IsDataTypeNumber || IsDataTypeDecimal) + && !std::is_same_v) { using FieldType = typename DataType::FieldType; res = Dispatcher::apply(column.column.get(), scale_arg); diff --git a/src/Functions/array/mapPopulateSeries.cpp b/src/Functions/array/mapPopulateSeries.cpp index 0db71ab2cf8..759696147c3 100644 --- a/src/Functions/array/mapPopulateSeries.cpp +++ b/src/Functions/array/mapPopulateSeries.cpp @@ -453,23 +453,29 @@ private: using ValueType = typename Types::RightType; static constexpr bool key_and_value_are_numbers = IsDataTypeNumber && IsDataTypeNumber; - static constexpr bool key_is_float = std::is_same_v || std::is_same_v; - if constexpr (key_and_value_are_numbers && !key_is_float) + if constexpr (key_and_value_are_numbers) { - using KeyFieldType = typename KeyType::FieldType; - using ValueFieldType = typename ValueType::FieldType; + if constexpr (is_floating_point) + { + return false; + } + else + { + using KeyFieldType = typename KeyType::FieldType; + using ValueFieldType = typename ValueType::FieldType; - executeImplTyped( - input.key_column, - input.value_column, - input.offsets_column, - input.max_key_column, - std::move(result_columns.result_key_column), - std::move(result_columns.result_value_column), - std::move(result_columns.result_offset_column)); + executeImplTyped( + input.key_column, + input.value_column, + input.offsets_column, + input.max_key_column, + std::move(result_columns.result_key_column), + std::move(result_columns.result_value_column), + std::move(result_columns.result_offset_column)); - return true; + return true; + } } return false; diff --git a/src/Functions/exp.cpp b/src/Functions/exp.cpp index d352cda7460..9b8207afe30 100644 --- a/src/Functions/exp.cpp +++ b/src/Functions/exp.cpp @@ -21,7 +21,14 @@ namespace template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Exp(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Exp(src, size, dst); + } } }; } diff --git a/src/Functions/log.cpp b/src/Functions/log.cpp index 9096b8c6f22..d5e10c90c83 100644 --- a/src/Functions/log.cpp +++ b/src/Functions/log.cpp @@ -20,7 +20,14 @@ struct LogName { static constexpr auto name = "log"; }; template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Log(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Log(src, size, dst); + } } }; diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index cf318db805b..a372e8d5d78 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp index d121bdc7389..1179329845d 100644 --- a/src/Functions/sigmoid.cpp +++ b/src/Functions/sigmoid.cpp @@ -21,7 +21,14 @@ namespace template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Sigmoid<>(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Sigmoid<>(src, size, dst); + } } }; } @@ -47,4 +54,3 @@ REGISTER_FUNCTION(Sigmoid) } } - diff --git a/src/Functions/tanh.cpp b/src/Functions/tanh.cpp index bdefa5263d7..49788b31970 100644 --- a/src/Functions/tanh.cpp +++ b/src/Functions/tanh.cpp @@ -19,7 +19,14 @@ struct TanhName { static constexpr auto name = "tanh"; }; template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Tanh<>(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Tanh<>(src, size, dst); + } } }; diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index a4eefeaffe2..d2e2868b245 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -155,7 +155,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { /// The library Ryu has low performance on integers. /// This workaround improves performance 6..10 times. @@ -165,10 +165,16 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) else result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; } - else + else if constexpr (std::is_same_v) { - /// This will support 16-bit floats as well. - float f32 = x; + if (DecomposedFloat32(x).isIntegerInRepresentableRange()) + result = itoa(Int32(x), buffer) - buffer; + else + result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; + } + else if constexpr (std::is_same_v) + { + Float32 f32 = BFloat16ToFloat32(x); if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) result = itoa(Int32(f32), buffer) - buffer; diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index c5ffbb96d6f..a0fad8840e6 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -183,7 +183,7 @@ private: if (sorted.load(std::memory_order_relaxed)) return; - if constexpr (std::is_arithmetic_v && !std::is_floating_point) + if constexpr (std::is_arithmetic_v && !is_floating_point) { if (likely(entries.size() > 256)) { diff --git a/src/Parsers/iostream_debug_helpers.cpp b/src/Parsers/iostream_debug_helpers.cpp deleted file mode 100644 index b74d337b22d..00000000000 --- a/src/Parsers/iostream_debug_helpers.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "iostream_debug_helpers.h" -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -std::ostream & operator<<(std::ostream & stream, const Token & what) -{ - stream << "Token (type="<< static_cast(what.type) <<"){"<< std::string{what.begin, what.end} << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Expected & what) -{ - stream << "Expected {variants="; - dumpValue(stream, what.variants) - << "; max_parsed_pos=" << what.max_parsed_pos << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IAST & what) -{ - WriteBufferFromOStream buf(stream, 4096); - buf << "IAST{"; - what.dumpTree(buf); - buf << "}"; - return stream; -} - -} diff --git a/src/Parsers/iostream_debug_helpers.h b/src/Parsers/iostream_debug_helpers.h deleted file mode 100644 index 39f52ebcbc2..00000000000 --- a/src/Parsers/iostream_debug_helpers.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once -#include - -namespace DB -{ -struct Token; -std::ostream & operator<<(std::ostream & stream, const Token & what); - -struct Expected; -std::ostream & operator<<(std::ostream & stream, const Expected & what); - -class IAST; -std::ostream & operator<<(std::ostream & stream, const IAST & what); - -} - -#include From f8b3987d5292ed1e2acfc7cab2b7bfcd80f1aee1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Tue, 25 Jun 2024 03:26:17 +0300 Subject: [PATCH 020/566] Delete attaching prefix for deduplicated parts --- .../MergeTree/ReplicatedMergeTreeSink.cpp | 9 ++- .../__init__.py | 0 .../test.py | 61 +++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_deduplicated_attached_part_rename/__init__.py create mode 100644 tests/integration/test_deduplicated_attached_part_rename/test.py diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 4b4f4c33e7d..4190e3cce5e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -561,8 +561,15 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl String block_id = deduplicate ? fmt::format("{}_{}", part->info.partition_id, part->checksums.getTotalChecksumHex()) : ""; bool deduplicated = commitPart(zookeeper, part, block_id, replicas_num).second; + int error = 0; /// Set a special error code if the block is duplicate - int error = (deduplicate && deduplicated) ? ErrorCodes::INSERT_WAS_DEDUPLICATED : 0; + /// And remove attaching_ prefix + if (deduplicate && deduplicated) + { + error = ErrorCodes::INSERT_WAS_DEDUPLICATED; + fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); + part->renameTo(new_relative_path, false); + } PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, watch.elapsed(), profile_events_scope.getSnapshot()), ExecutionStatus(error)); return deduplicated; } diff --git a/tests/integration/test_deduplicated_attached_part_rename/__init__.py b/tests/integration/test_deduplicated_attached_part_rename/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py new file mode 100644 index 00000000000..362b2bad37a --- /dev/null +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -0,0 +1,61 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "dedup_attach" + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(query): + return ch1.query(database=database_name, sql=query) + + +def test_deduplicated_attached_part_renamed_after_attach(started_cluster): + ch1.query(f"CREATE DATABASE {database_name}") + + q("CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;") + q("INSERT INTO dedup VALUES (1),(2),(3);") + + table_data_path = q("SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'").strip("'[]\n") + + ch1.exec_in_container( + [ + "bash", + "-c", + f"cp -r {table_data_path}/all_0_0_0 {table_data_path}/detached/all_0_0_0", + ] + ) + # Part is attached as all_1_1_0 + q("ALTER TABLE dedup ATTACH PART 'all_0_0_0'") + + assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + + ch1.exec_in_container( + [ + "bash", + "-c", + f"cp -r {table_data_path}/all_1_1_0 {table_data_path}/detached/all_1_1_0", + ] + ) + # Part is deduplicated and not attached + q("ALTER TABLE dedup ATTACH PART 'all_1_1_0'") + + assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 1 == int(q(f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + # Check that it is not 'attaching_all_1_1_0' + assert "all_1_1_0" == q(f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip() From 6601ded4a1332548ae4cfe35c7ba8f276214d153 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Wed, 10 Jul 2024 23:02:11 +0300 Subject: [PATCH 021/566] Fix black --- .../test.py | 34 +++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 362b2bad37a..2b7ab0934d1 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -11,6 +11,7 @@ ch1 = cluster.add_instance( database_name = "dedup_attach" + @pytest.fixture(scope="module") def started_cluster(): try: @@ -28,10 +29,14 @@ def q(query): def test_deduplicated_attached_part_renamed_after_attach(started_cluster): ch1.query(f"CREATE DATABASE {database_name}") - q("CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;") + q( + "CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;" + ) q("INSERT INTO dedup VALUES (1),(2),(3);") - table_data_path = q("SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'").strip("'[]\n") + table_data_path = q( + "SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'" + ).strip("'[]\n") ch1.exec_in_container( [ @@ -43,7 +48,11 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): # Part is attached as all_1_1_0 q("ALTER TABLE dedup ATTACH PART 'all_0_0_0'") - assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 2 == int( + q( + f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) ch1.exec_in_container( [ @@ -55,7 +64,20 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): # Part is deduplicated and not attached q("ALTER TABLE dedup ATTACH PART 'all_1_1_0'") - assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) - assert 1 == int(q(f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 2 == int( + q( + f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) + assert 1 == int( + q( + f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) # Check that it is not 'attaching_all_1_1_0' - assert "all_1_1_0" == q(f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip() + assert ( + "all_1_1_0" + == q( + f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) From 83b79fce832ef4bd3fa230ae8a66d23935724985 Mon Sep 17 00:00:00 2001 From: qhsong Date: Wed, 8 May 2024 16:18:13 +0800 Subject: [PATCH 022/566] Add repeatable uniq ID for processor and step --- src/Common/ThreadStatus.cpp | 10 +++++++ src/Common/ThreadStatus.h | 9 +++++++ src/Interpreters/Context.h | 2 ++ src/Interpreters/ProcessorsProfileLog.cpp | 4 +++ src/Interpreters/ProcessorsProfileLog.h | 4 ++- src/Interpreters/executeQuery.cpp | 2 ++ .../Executors/ExecutionThreadContext.cpp | 2 +- src/Processors/IProcessor.h | 26 ++++++++++++++++--- src/Processors/QueryPlan/IQueryPlanStep.h | 12 ++++++++- src/Processors/QueryPlan/QueryPlan.cpp | 1 + src/QueryPipeline/QueryPipelineBuilder.cpp | 4 +-- src/QueryPipeline/printPipeline.cpp | 4 +-- src/QueryPipeline/printPipeline.h | 2 +- .../01786_explain_merge_tree.reference | 2 ++ 14 files changed, 73 insertions(+), 11 deletions(-) diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index ad96018a17e..74b5475da77 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -221,6 +221,16 @@ bool ThreadStatus::isQueryCanceled() const return false; } +size_t ThreadStatus::incrStepIndex() +{ + return ++(*local_data.step_count); +} + +size_t ThreadStatus::incrProcessorIndex() +{ + return ++(*local_data.processor_count); +} + ThreadStatus::~ThreadStatus() { flushUntrackedMemory(); diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 0c02ab8fdb0..97b45c01e54 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -10,6 +10,7 @@ #include +#include #include #include #include @@ -90,6 +91,11 @@ public: String query_for_logs; UInt64 normalized_query_hash = 0; + //QueryPlan can not build parallel, but processor may build parallel in expand() function. + //so we use atomic_size_t for processor_count + std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr processor_count = std::make_shared(0); + QueryIsCanceledPredicate query_is_canceled_predicate = {}; }; @@ -309,6 +315,9 @@ public: void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period); + size_t incrStepIndex(); + size_t incrProcessorIndex(); + private: void applyGlobalSettings(); void applyQuerySettings(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d1ff5b4c2b2..692d71a3384 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1336,7 +1336,9 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling + public: + ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Interpreters/ProcessorsProfileLog.cpp b/src/Interpreters/ProcessorsProfileLog.cpp index 7dec2a3163a..8e1cf278c63 100644 --- a/src/Interpreters/ProcessorsProfileLog.cpp +++ b/src/Interpreters/ProcessorsProfileLog.cpp @@ -42,6 +42,8 @@ ColumnsDescription ProcessorProfileLogElement::getColumnsDescription() {"input_bytes", std::make_shared(), "The number of bytes consumed by processor."}, {"output_rows", std::make_shared(), "The number of rows generated by processor."}, {"output_bytes", std::make_shared(), "The number of bytes generated by processor."}, + {"processor_uniq_id", std::make_shared(), "The uniq processor id in pipeline."}, + {"step_uniq_id", std::make_shared(), "The uniq step id in plan."}, }; } @@ -75,6 +77,8 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(input_bytes); columns[i++]->insert(output_rows); columns[i++]->insert(output_bytes); + columns[i++]->insert(processor_uniq_id); + columns[i++]->insert(step_uniq_id); } diff --git a/src/Interpreters/ProcessorsProfileLog.h b/src/Interpreters/ProcessorsProfileLog.h index 8319d373f39..1b2abaa8ede 100644 --- a/src/Interpreters/ProcessorsProfileLog.h +++ b/src/Interpreters/ProcessorsProfileLog.h @@ -17,12 +17,14 @@ struct ProcessorProfileLogElement UInt64 id{}; std::vector parent_ids; - UInt64 plan_step{}; + UInt64 plan_step; UInt64 plan_group{}; String initial_query_id; String query_id; String processor_name; + String processor_uniq_id; + String step_uniq_id; /// Milliseconds spend in IProcessor::work() UInt32 elapsed_us{}; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 33a4cf2a74c..59573e912e4 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -460,6 +460,8 @@ void logQueryFinish( processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); processor_elem.plan_group = processor->getQueryPlanStepGroup(); + processor_elem.processor_uniq_id = processor->getUniqID(); + processor_elem.step_uniq_id = processor->getStepUniqID(); processor_elem.processor_name = processor->getName(); diff --git a/src/Processors/Executors/ExecutionThreadContext.cpp b/src/Processors/Executors/ExecutionThreadContext.cpp index 05669725f9a..06b4b53c817 100644 --- a/src/Processors/Executors/ExecutionThreadContext.cpp +++ b/src/Processors/Executors/ExecutionThreadContext.cpp @@ -79,7 +79,7 @@ bool ExecutionThreadContext::executeTask() if (trace_processors) { - span = std::make_unique(node->processor->getName()); + span = std::make_unique(node->processor->getUniqID()); span->addAttribute("thread_number", thread_number); } std::optional execution_time_watch; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 56b4509fe00..b99ebeb5fa5 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -3,7 +3,9 @@ #include #include #include - +#include +#include +#include class EventCounter; @@ -121,7 +123,10 @@ protected: OutputPorts outputs; public: - IProcessor() = default; + IProcessor() + { + setProcessorIndex(); + } IProcessor(InputPorts inputs_, OutputPorts outputs_) : inputs(std::move(inputs_)), outputs(std::move(outputs_)) @@ -130,9 +135,16 @@ public: port.processor = this; for (auto & port : outputs) port.processor = this; + setProcessorIndex(); + } + + void setProcessorIndex() + { + processor_index = CurrentThread::get().incrProcessorIndex(); } virtual String getName() const = 0; + String getUniqID() const { return fmt::format("{}_{}", getName(), processor_index); } enum class Status { @@ -300,11 +312,16 @@ public: /// Step of QueryPlan from which processor was created. void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0) { - query_plan_step = step; + if (step != nullptr) + { + query_plan_step = step; + step_uniq_id = step->getUniqID(); + } query_plan_step_group = group; } IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; } + const String &getStepUniqID() const { return step_uniq_id; } size_t getQueryPlanStepGroup() const { return query_plan_step_group; } uint64_t getElapsedUs() const { return elapsed_us; } @@ -392,7 +409,10 @@ private: size_t stream_number = NO_STREAM; IQueryPlanStep * query_plan_step = nullptr; + String step_uniq_id; size_t query_plan_step_group = 0; + + size_t processor_index = 0; }; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index ac5ea259d2e..ec5ac9ad4dc 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -2,6 +2,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -71,6 +74,10 @@ using QueryPlanRawPtrs = std::list; class IQueryPlanStep { public: + IQueryPlanStep() + { + step_index = CurrentThread::get().incrStepIndex(); + } virtual ~IQueryPlanStep() = default; virtual String getName() const = 0; @@ -138,7 +145,7 @@ public: } virtual bool canUpdateInputStream() const { return false; } - + String getUniqID() const { return fmt::format("{}_{}", getName(), step_index); } protected: virtual void updateOutputStream() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented"); } @@ -153,6 +160,9 @@ protected: Processors processors; static void describePipeline(const Processors & processors, FormatSettings & settings); + +private: + size_t step_index = 0; }; using QueryPlanStepPtr = std::unique_ptr; diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index 0fae7e8df4d..f651870453b 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -206,6 +206,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options) { map.add("Node Type", step.getName()); + map.add("Node Id", step.getUniqID()); if (options.description) { diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 67a8fe5dcab..4b6f15905ce 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -400,10 +400,10 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe left->pipe.collected_processors = collected_processors; - /// Collect the NEW processors for the right pipeline. - QueryPipelineProcessorsCollector collector(*right); /// Remember the last step of the right pipeline. IQueryPlanStep * step = right->pipe.processors->back()->getQueryPlanStep(); + /// Collect the NEW processors for the right pipeline. + QueryPipelineProcessorsCollector collector(*right, step); /// In case joined subquery has totals, and we don't, add default chunk to totals. bool default_totals = false; diff --git a/src/QueryPipeline/printPipeline.cpp b/src/QueryPipeline/printPipeline.cpp index 40c88502ed0..1726d776921 100644 --- a/src/QueryPipeline/printPipeline.cpp +++ b/src/QueryPipeline/printPipeline.cpp @@ -113,7 +113,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool if (item.first != nullptr) { out << " subgraph cluster_" << next_step << " {\n"; - out << " label =\"" << item.first->getName() << "\";\n"; + out << " label =\"" << item.first->getUniqID() << "\";\n"; out << " style=filled;\n"; out << " color=lightgrey;\n"; out << " node [style=filled,color=white];\n"; @@ -125,7 +125,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool for (const auto & node : item.second) { const auto & processor = node->agents.front(); - out << " n" << node->id << " [label=\"" << processor->getName(); + out << " n" << node->id << " [label=\"" << processor->getUniqID(); if (node->agents.size() > 1) out << " × " << node->agents.size(); diff --git a/src/QueryPipeline/printPipeline.h b/src/QueryPipeline/printPipeline.h index 2bdbd8f7a07..e6799251851 100644 --- a/src/QueryPipeline/printPipeline.h +++ b/src/QueryPipeline/printPipeline.h @@ -30,7 +30,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri for (const auto & processor : processors) { const auto & description = processor->getDescription(); - out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << (description.empty() ? "" : ":") << description; + out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getUniqID() << (description.empty() ? "" : ":") << description; if (statuses_iter != statuses.end()) { diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 3a015d32539..36ebbe1a1da 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,6 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { @@ -126,6 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { From 59bd7447fcb1db44bc77d93339b36dae684d5daf Mon Sep 17 00:00:00 2001 From: qhsong Date: Tue, 30 Jul 2024 10:23:51 +0800 Subject: [PATCH 023/566] Fix testcase --- src/Common/ThreadStatus.h | 2 +- src/Interpreters/Context.h | 2 -- src/Processors/IProcessor.h | 5 ++++- src/Processors/QueryPlan/IQueryPlanStep.h | 5 ++++- .../0_stateless/01786_explain_merge_tree.reference | 4 ++-- .../0_stateless/01823_explain_json.reference | 13 +++++++++++-- .../03213_distributed_analyzer.reference | 2 +- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 97b45c01e54..fd384ad1603 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -93,7 +93,7 @@ public: //QueryPlan can not build parallel, but processor may build parallel in expand() function. //so we use atomic_size_t for processor_count - std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr step_count = std::make_shared(0); std::shared_ptr processor_count = std::make_shared(0); QueryIsCanceledPredicate query_is_canceled_predicate = {}; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 1f331f0d094..cb553d07513 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1404,9 +1404,7 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling - public: - ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 3933a79ab55..d426d5ef9ba 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -140,7 +140,10 @@ public: void setProcessorIndex() { - processor_index = CurrentThread::get().incrProcessorIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + processor_index = CurrentThread::get().incrProcessorIndex(); + } } virtual String getName() const = 0; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index acd8857b9df..500e0812983 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -76,7 +76,10 @@ class IQueryPlanStep public: IQueryPlanStep() { - step_index = CurrentThread::get().incrStepIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + step_index = CurrentThread::get().incrStepIndex(); + } } virtual ~IQueryPlanStep() = default; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 36ebbe1a1da..75736669905 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,7 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { @@ -127,7 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { diff --git a/tests/queries/0_stateless/01823_explain_json.reference b/tests/queries/0_stateless/01823_explain_json.reference index 23fb34c2192..1aa5aa134e9 100644 --- a/tests/queries/0_stateless/01823_explain_json.reference +++ b/tests/queries/0_stateless/01823_explain_json.reference @@ -2,20 +2,25 @@ { "Plan": { "Node Type": "Union", + "Node Id": "Union_11", "Plans": [ { "Node Type": "Expression", + "Node Id": "Expression_14", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_1" } ] }, { "Node Type": "Expression", + "Node Id": "Expression_17", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_5" } ] } @@ -35,6 +40,7 @@ } -------- "Node Type": "Aggregating", + "Node Id": "Aggregating_4", "Header": [ { "Name": "__table1.number", @@ -73,13 +79,16 @@ ], -------- "Node Type": "ArrayJoin", + "Node Id": "ArrayJoin_5", "Left": false, "Columns": ["__table1.x", "__table1.y"], -------- "Node Type": "Distinct", + "Node Id": "Distinct_5", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -- "Node Type": "Distinct", + "Node Id": "Distinct_4", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -------- "Sort Description": [ diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.reference b/tests/queries/0_stateless/03213_distributed_analyzer.reference index 9d63c0a7a5e..2456192ca9d 100644 --- a/tests/queries/0_stateless/03213_distributed_analyzer.reference +++ b/tests/queries/0_stateless/03213_distributed_analyzer.reference @@ -1 +1 @@ -['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}'] +['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}'] From 351ba3ef102979714d546e7575a9f9f54325498a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 10:07:39 +0200 Subject: [PATCH 024/566] Revert "Revert "Use `Atomic` database by default in `clickhouse-local`"" --- programs/local/LocalServer.cpp | 21 +++++---- src/Databases/DatabaseAtomic.cpp | 24 ++++++++-- src/Databases/DatabaseAtomic.h | 3 ++ src/Databases/DatabaseLazy.cpp | 3 +- src/Databases/DatabaseLazy.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 28 ++++++++--- src/Databases/DatabaseOnDisk.h | 7 ++- src/Databases/DatabaseOrdinary.cpp | 4 +- src/Databases/DatabasesOverlay.cpp | 47 +++++++++++++++++++ src/Databases/DatabasesOverlay.h | 9 ++++ src/Databases/IDatabase.h | 1 + .../MySQL/DatabaseMaterializedMySQL.cpp | 1 + src/Interpreters/StorageID.h | 1 - .../0_stateless/01191_rename_dictionary.sql | 1 + ...ickhouse_local_interactive_table.reference | 4 +- ...2141_clickhouse_local_interactive_table.sh | 4 +- .../03199_atomic_clickhouse_local.reference | 6 +++ .../03199_atomic_clickhouse_local.sh | 24 ++++++++++ 18 files changed, 161 insertions(+), 29 deletions(-) create mode 100644 tests/queries/0_stateless/03199_atomic_clickhouse_local.reference create mode 100755 tests/queries/0_stateless/03199_atomic_clickhouse_local.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 6b0b8fc5b50..0d731ed0e14 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -50,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -216,12 +216,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } -static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) { - auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); - return databaseCombiner; + auto overlay = std::make_shared(name_, context); + overlay->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); + overlay->registerNextDatabase(std::make_shared(name_, "", context)); + return overlay; } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery() else table_structure = "(" + table_structure + ")"; - return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", + return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});", table_name, table_structure, data_format, table_file); } @@ -761,7 +761,12 @@ void LocalServer::processConfig() DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); std::string default_database = server_settings.default_database; - DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); + { + DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); + if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().addUUIDMapping(uuid); + DatabaseCatalog::instance().attachDatabase(default_database, database); + } global_context->setCurrentDatabase(default_database); if (getClientConfiguration().has("path")) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index d86e29ca915..83b82976e4f 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -53,9 +53,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c , db_uuid(uuid) { assert(db_uuid != UUIDHelpers::Nil); - fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); - fs::create_directories(path_to_table_symlinks); - tryCreateMetadataSymlink(); } DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_) @@ -63,6 +60,16 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C { } +void DatabaseAtomic::createDirectories() +{ + if (database_atomic_directories_created.test_and_set()) + return; + DatabaseOnDisk::createDirectories(); + fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); + fs::create_directories(path_to_table_symlinks); + tryCreateMetadataSymlink(); +} + String DatabaseAtomic::getTableDataPath(const String & table_name) const { std::lock_guard lock(mutex); @@ -99,6 +106,7 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); + createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); not_in_use = cleanupDetachedTables(); @@ -200,11 +208,15 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ if (exchange && !supportsAtomicRename()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported"); + createDirectories(); waitDatabaseStarted(); auto & other_db = dynamic_cast(to_database); bool inside_database = this == &other_db; + if (!inside_database) + other_db.createDirectories(); + String old_metadata_path = getObjectMetadataPath(table_name); String new_metadata_path = to_database.getObjectMetadataPath(to_table_name); @@ -325,6 +337,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context) { + createDirectories(); DetachedTables not_in_use; auto table_data_path = getTableDataPath(query); try @@ -461,6 +474,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin if (mode < LoadingStrictnessLevel::FORCE_RESTORE) return; + if (!fs::exists(path_to_table_symlinks)) + return; + /// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks)) { @@ -588,6 +604,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new { /// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard + createDirectories(); waitDatabaseStarted(); bool check_ref_deps = query_context->getSettingsRef().check_referential_table_dependencies; @@ -679,4 +696,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory) }; factory.registerDatabase("Atomic", create_fn); } + } diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 4a4ccfa2573..ca24494f600 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -76,6 +76,9 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); + std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; + void createDirectories(); + void tryCreateMetadataSymlink(); virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; } diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 3fb6d30fcb8..e43adfc5d37 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -47,12 +47,13 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, : DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_) , expiration_time(expiration_time_) { + createDirectories(); } void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) + iterateMetadataFiles([this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 41cfb751141..aeac130594f 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only the cache. + * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 734f354d9a5..82a81b0b32d 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -172,7 +172,14 @@ DatabaseOnDisk::DatabaseOnDisk( , metadata_path(metadata_path_) , data_path(data_path_) { - fs::create_directories(local_context->getPath() + data_path); +} + + +void DatabaseOnDisk::createDirectories() +{ + if (directories_created.test_and_set()) + return; + fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } @@ -190,6 +197,8 @@ void DatabaseOnDisk::createTable( const StoragePtr & table, const ASTPtr & query) { + createDirectories(); + const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); assert(table_name == create.getTable()); @@ -257,7 +266,6 @@ void DatabaseOnDisk::createTable( } commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context); - removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false); } @@ -285,6 +293,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora { try { + createDirectories(); + /// Add a table to the map of known tables. attachTable(query_context, query.getTable(), table, getTableDataPath(query)); @@ -420,6 +430,7 @@ void DatabaseOnDisk::renameTable( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported"); } + createDirectories(); waitDatabaseStarted(); auto table_data_relative_path = getTableDataPath(table_name); @@ -568,14 +579,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(local_context->getPath() + getDataPath()); + (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(local_context->getPath() + getDataPath()); + (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -613,15 +624,18 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const { + if (!fs::exists(metadata_path)) + return; + auto process_tmp_drop_metadata_file = [&](const String & file_name) { assert(getUUID() == UUIDHelpers::Nil); static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) + if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); @@ -638,7 +652,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 12656068643..0c0ecf76a26 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } + String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, @@ -99,6 +99,9 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} + std::atomic_flag directories_created = ATOMIC_FLAG_INIT; + void createDirectories(); + const String metadata_path; const String data_path; }; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 8808261654f..dd8a3f42ea8 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) { } @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(local_context, process_metadata); + iterateMetadataFiles(process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 801356b3dd7..495733e15fd 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,6 +14,8 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) @@ -124,6 +126,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } +void DatabasesOverlay::renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) +{ + for (auto & db : databases) + { + if (db->isTableExist(name, current_context)) + { + if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) + { + /// Renaming from Overlay database inside itself or into another Overlay database. + /// Just use the first database in the overlay as a destination. + if (to_overlay_database->databases.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); + + db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); + } + else + { + /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. + db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); + } + + return; + } + } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); +} + ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; @@ -178,6 +213,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } +UUID DatabasesOverlay::getUUID() const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->getUUID(); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index b0c7e7e4032..40c653e5cb5 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,12 +35,21 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; + void renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) override; + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; + UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index f94326d220e..02418abb2b0 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -416,6 +416,7 @@ public: std::lock_guard lock{mutex}; return database_name; } + /// Get UUID of database. virtual UUID getUUID() const { return UUIDHelpers::Nil; } diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index 2f5477a6b9d..8b3850c4e0c 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -46,6 +46,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( , settings(std::move(settings_)) , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get()) { + createDirectories(); } void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index f9afbc7b98d..ad55d16e284 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; -// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index c5012dabc81..be95e5a7d4b 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,6 +27,7 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; +DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0bb8966cbe4..0e74c0a083e 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 934d87616ac..3a95e59416a 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference new file mode 100644 index 00000000000..1975397394b --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference @@ -0,0 +1,6 @@ +123 +Hello +['Hello','world'] +Hello +Hello +['Hello','world'] diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh new file mode 100755 index 00000000000..edaa83b8f95 --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_LOCAL} -n " +CREATE TABLE test (x UInt8) ORDER BY x; +INSERT INTO test VALUES (123); +SELECT * FROM test; +CREATE OR REPLACE TABLE test (s String) ORDER BY s; +INSERT INTO test VALUES ('Hello'); +SELECT * FROM test; +RENAME TABLE test TO test2; +CREATE OR REPLACE TABLE test (s Array(String)) ORDER BY s; +INSERT INTO test VALUES (['Hello', 'world']); +SELECT * FROM test; +SELECT * FROM test2; +EXCHANGE TABLES test AND test2; +SELECT * FROM test; +SELECT * FROM test2; +DROP TABLE test; +DROP TABLE test2; +" From 848285eabc5accf96084f847c86be5e583ab80a0 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 12 Aug 2024 15:40:03 +0000 Subject: [PATCH 025/566] Fix OrderByLimitByDuplicateEliminationPass with IGNORE NULLS --- src/Analyzer/FunctionNode.cpp | 7 +++ src/Analyzer/Passes/FuseFunctionsPass.cpp | 5 +- ...ore_nulls_query_tree_elimination.reference | 3 ++ ...22_ignore_nulls_query_tree_elimination.sql | 51 +++++++++++++++++++ 4 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference create mode 100644 tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp index e98b04fe9a9..f402309c7be 100644 --- a/src/Analyzer/FunctionNode.cpp +++ b/src/Analyzer/FunctionNode.cpp @@ -88,6 +88,7 @@ void FunctionNode::resolveAsFunction(FunctionBasePtr function_value) function_name = function_value->getName(); function = std::move(function_value); kind = FunctionKind::ORDINARY; + nulls_action = NullsAction::EMPTY; } void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value) @@ -95,6 +96,12 @@ void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_fun function_name = aggregate_function_value->getName(); function = std::move(aggregate_function_value); kind = FunctionKind::AGGREGATE; + /** When the function is resolved, we do not need the nulls action anymore. + * The only thing that the nulls action does is map from one function to another. + * Thus, the nulls action is encoded in the function name and does not make sense anymore. + * Keeping the nulls action may lead to incorrect comparison of functions, e.g., count() and count() IGNORE NULLS are the same function. + */ + nulls_action = NullsAction::EMPTY; } void FunctionNode::resolveAsWindowFunction(AggregateFunctionPtr window_function_value) diff --git a/src/Analyzer/Passes/FuseFunctionsPass.cpp b/src/Analyzer/Passes/FuseFunctionsPass.cpp index 0175e304a2b..1009e7981ea 100644 --- a/src/Analyzer/Passes/FuseFunctionsPass.cpp +++ b/src/Analyzer/Passes/FuseFunctionsPass.cpp @@ -81,10 +81,9 @@ QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String } FunctionNodePtr createResolvedAggregateFunction( - const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}, NullsAction action = NullsAction::EMPTY) + const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}) { auto function_node = std::make_shared(name); - function_node->setNullsAction(action); if (!parameters.empty()) { @@ -96,7 +95,7 @@ FunctionNodePtr createResolvedAggregateFunction( function_node->getArguments().getNodes() = { argument }; AggregateFunctionProperties properties; - auto aggregate_function = AggregateFunctionFactory::instance().get(name, action, {argument->getResultType()}, parameters, properties); + auto aggregate_function = AggregateFunctionFactory::instance().get(name, NullsAction::EMPTY, {argument->getResultType()}, parameters, properties); function_node->resolveAsAggregateFunction(std::move(aggregate_function)); return function_node; diff --git a/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference new file mode 100644 index 00000000000..1f242fa6f00 --- /dev/null +++ b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference @@ -0,0 +1,3 @@ +3 +3 +3 diff --git a/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql new file mode 100644 index 00000000000..72f9781ed45 --- /dev/null +++ b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql @@ -0,0 +1,51 @@ +#!/usr/bin/env -S ${HOME}/clickhouse-client --queries-file + +DROP TABLE IF EXISTS with_fill_date__fuzz_0; + +CREATE TABLE with_fill_date__fuzz_0 +( + `d` Date, + `d32` Nullable(Int32), + `d33` Int32 +) +ENGINE = Memory; + + +INSERT INTO with_fill_date__fuzz_0 VALUES (toDate('2020-03-03'), 1, 3), (toDate('2020-03-03'), NULL, 3), (toDate('2020-02-05'), 1, 1); + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + count(), + count() IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32) RESPECT NULLS, + any_respect_nulls(d32), + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32), + any(d32) IGNORE NULLS, + any(d32) RESPECT NULLS, + any_respect_nulls(d32) IGNORE NULLS, + any_respect_nulls(d32), + sum(d33), + sum(d33) IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + From 0abb330356245b27d929c750101dcfd1925cb6a4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 13 Aug 2024 09:21:39 +0000 Subject: [PATCH 026/566] fix 03010_sum_to_to_count_if_nullable.reference --- .../0_stateless/03010_sum_to_to_count_if_nullable.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference index 79ebc7a5c0c..db8d26ccfea 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference @@ -83,7 +83,7 @@ QUERY id: 0 FUNCTION id: 4, function_name: tuple, function_type: ordinary, result_type: Tuple(Nullable(UInt64)) ARGUMENTS LIST id: 5, nodes: 1 - FUNCTION id: 6, function_name: sum, function_type: aggregate, nulls_action : IGNORE_NULLS, result_type: Nullable(UInt64) + FUNCTION id: 6, function_name: sum, function_type: aggregate, result_type: Nullable(UInt64) ARGUMENTS LIST id: 7, nodes: 1 FUNCTION id: 8, function_name: if, function_type: ordinary, result_type: Nullable(UInt8) From 1ba1efe3a77fc5181d3c8e228c93e5f20a087c86 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 20 Aug 2024 17:01:41 +0000 Subject: [PATCH 027/566] fix --- .../Algorithms/MergeTreePartLevelInfo.h | 29 ------------------- 1 file changed, 29 deletions(-) delete mode 100644 src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h diff --git a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h b/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h deleted file mode 100644 index e4f22deec8d..00000000000 --- a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -/// To carry part level if chunk is produced by a merge tree source -class MergeTreePartLevelInfo : public ChunkInfoCloneable -{ -public: - MergeTreePartLevelInfo() = delete; - explicit MergeTreePartLevelInfo(ssize_t part_level) - : origin_merge_tree_part_level(part_level) - { } - MergeTreePartLevelInfo(const MergeTreePartLevelInfo & other) = default; - - size_t origin_merge_tree_part_level = 0; -}; - -inline size_t getPartLevelFromChunk(const Chunk & chunk) -{ - const auto part_level_info = chunk.getChunkInfos().get(); - if (part_level_info) - return part_level_info->origin_merge_tree_part_level; - return 0; -} - -} From 57996cc68463d750d31ff26071176b3f8cbfa9ae Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 3 Sep 2024 02:38:48 +0000 Subject: [PATCH 028/566] temp fix --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 6 +++--- tests/queries/0_stateless/02346_fulltext_index_search.sql | 8 ++++---- .../03031_read_in_order_optimization_with_virtual_row.sql | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index cc28884df24..4f1df44f68a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -164,8 +164,8 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } auto chunk = Chunk(ordered_columns, res.row_count); - if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, true)); + chunk.getChunkInfos().add(std::make_shared( + add_part_level ? task->getInfo().data_part->info.level : 0, true)); return ChunkAndProgress{ .chunk = std::move(chunk), @@ -190,7 +190,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() auto chunk = Chunk(ordered_columns, res.row_count); if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, true)); + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); return ChunkAndProgress{ .chunk = std::move(chunk), diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index 179d98a161b..f0505f63124 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 1; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 0; --- check the query only read 3 granules (6 rows total; each granule has 2 rows; there are 2 extra virtual rows) +-- check the query only read 3 granules (6 rows total; each granule has 2 rows) SYSTEM FLUSH LOGS; -SELECT read_rows==8 from system.query_log +SELECT read_rows==6 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 1;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 0;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index aff9faf3968..5bae739bc51 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -108,6 +108,7 @@ ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +read_in_order_use_buffering = false, --avoid buffer max_threads = 1, optimize_read_in_order = 1, log_comment = 'no preliminary merge, with filter'; From 87c7a8b4fbfbea4e9b02ae6494b5baad7dd30b42 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 4 Sep 2024 23:08:02 +0000 Subject: [PATCH 029/566] virtualrow sketch --- .../QueryPlan/ReadFromMergeTree.cpp | 3 + src/Processors/QueryPlan/SortingStep.cpp | 3 +- .../Transforms/VirtualRowTransform.cpp | 99 +++++++++++++++++++ .../Transforms/VirtualRowTransform.h | 28 ++++++ 4 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 src/Processors/Transforms/VirtualRowTransform.cpp create mode 100644 src/Processors/Transforms/VirtualRowTransform.h diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index fd1f09f1df8..90e499d02f7 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -635,6 +636,8 @@ Pipe ReadFromMergeTree::readInOrder( }); } + pipe.addSimpleTransform([](const Block & header){ return std::make_shared(header); }); + return pipe; } diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index f1ee68d64cf..aa909bef8a9 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -259,7 +260,7 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const { merge_tree_sources.push_back(merge_tree_source); } - else if (!std::dynamic_pointer_cast(processor)) + else if (!std::dynamic_pointer_cast(processor) && !std::dynamic_pointer_cast(processor)) { enable_virtual_row = false; break; diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp new file mode 100644 index 00000000000..2e486616e8e --- /dev/null +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -0,0 +1,99 @@ +#include +#include "Processors/Chunk.h" + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +VirtualRowTransform::VirtualRowTransform(const Block & header) + : IInflatingTransform(header, header) +{ +} + +IInflatingTransform::Status VirtualRowTransform::prepare() +{ + /// Check can output. + + if (output.isFinished()) + { + input.close(); + return Status::Finished; + } + + if (!output.canPush()) + { + input.setNotNeeded(); + return Status::PortFull; + } + + /// Output if has data. + if (generated) + { + output.push(std::move(current_chunk)); + generated = false; + return Status::PortFull; + } + + if (can_generate) + return Status::Ready; + + /// Check can input. + if (!has_input) + { + if (input.isFinished()) + { + if (is_finished) + { + output.finish(); + return Status::Finished; + } + is_finished = true; + return Status::Ready; + } + + input.setNeeded(); + + if (!input.hasData()) + return Status::NeedData; + + /// Set input port NotNeeded after chunk was pulled. + current_chunk = input.pull(true); + has_input = true; + } + + /// Now transform. + return Status::Ready; +} + +void VirtualRowTransform::consume(Chunk chunk) +{ + if (!is_first) + { + temp_chunk = std::move(chunk); + return; + } + + is_first = false; + temp_chunk = std::move(chunk); +} + +Chunk VirtualRowTransform::generate() +{ + if (temp_chunk.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); + + Chunk result; + result.swap(temp_chunk); + return result; +} + +bool VirtualRowTransform::canGenerate() +{ + return !temp_chunk.empty(); +} + +} diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h new file mode 100644 index 00000000000..d054c798345 --- /dev/null +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class VirtualRowTransform : public IInflatingTransform +{ +public: + explicit VirtualRowTransform(const Block & header); + + String getName() const override { return "VirtualRowTransform"; } + + Status prepare() override; + +protected: + void consume(Chunk chunk) override; + bool canGenerate() override; + Chunk generate() override; + +private: + bool is_first = false; + Chunk temp_chunk; +}; + +} From 67ad7b592ce5152496bc8ddc5f3dce3cb7e9d571 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 6 Sep 2024 04:12:03 +0000 Subject: [PATCH 030/566] better --- .../Transforms/VirtualRowTransform.cpp | 41 ++++++++++++------- .../Transforms/VirtualRowTransform.h | 19 ++++++--- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 2e486616e8e..e79ede2abec 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -10,11 +10,12 @@ namespace ErrorCodes } VirtualRowTransform::VirtualRowTransform(const Block & header) - : IInflatingTransform(header, header) + : IProcessor({header}, {header}) + , input(inputs.front()), output(outputs.front()) { } -IInflatingTransform::Status VirtualRowTransform::prepare() +VirtualRowTransform::Status VirtualRowTransform::prepare() { /// Check can output. @@ -46,13 +47,8 @@ IInflatingTransform::Status VirtualRowTransform::prepare() { if (input.isFinished()) { - if (is_finished) - { - output.finish(); - return Status::Finished; - } - is_finished = true; - return Status::Ready; + output.finish(); + return Status::Finished; } input.setNeeded(); @@ -69,6 +65,28 @@ IInflatingTransform::Status VirtualRowTransform::prepare() return Status::Ready; } +void VirtualRowTransform::work() +{ + if (can_generate) + { + if (generated) + throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it already was generated"); + + current_chunk = generate(); + generated = true; + can_generate = false; + } + else + { + if (!has_input) + throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it wasn't read"); + + consume(std::move(current_chunk)); + has_input = false; + can_generate = true; + } +} + void VirtualRowTransform::consume(Chunk chunk) { if (!is_first) @@ -91,9 +109,4 @@ Chunk VirtualRowTransform::generate() return result; } -bool VirtualRowTransform::canGenerate() -{ - return !temp_chunk.empty(); -} - } diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index d054c798345..7f6be5d792e 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -6,7 +6,7 @@ namespace DB { -class VirtualRowTransform : public IInflatingTransform +class VirtualRowTransform : public IProcessor { public: explicit VirtualRowTransform(const Block & header); @@ -14,13 +14,20 @@ public: String getName() const override { return "VirtualRowTransform"; } Status prepare() override; - -protected: - void consume(Chunk chunk) override; - bool canGenerate() override; - Chunk generate() override; + void work() override; private: + void consume(Chunk chunk); + Chunk generate(); + + InputPort & input; + OutputPort & output; + + Chunk current_chunk; + bool has_input = false; + bool generated = false; + bool can_generate = false; + bool is_first = false; Chunk temp_chunk; }; From 384617cfdf26539d5478120caa25f7e57a28d6b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Fri, 6 Sep 2024 18:12:16 +0300 Subject: [PATCH 031/566] Check for unexpected relative path --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index cf5537452f3..68aa370959c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -582,6 +582,8 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl if (deduplicate && deduplicated) { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; + if (!startsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_")) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); } From 35e263a4205afa405a5f819fdf91e102ad0cd088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Fri, 6 Sep 2024 18:12:44 +0300 Subject: [PATCH 032/566] Cleanup for flaky tests --- .../test_deduplicated_attached_part_rename/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 2b7ab0934d1..7afd85c62dc 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -81,3 +81,7 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" ).strip() ) + + q("DROP TABLE dedup") + q("SYSTEM DROP REPLICA 'r1' FROM ZKPATH '/clickhouse/tables/dedup_attach/dedup/s1'") + ch1.query(f"DROP DATABASE {database_name}") From 36f62334c40610ad062a41ef3edbb8ecd535afff Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 8 Sep 2024 00:31:02 +0000 Subject: [PATCH 033/566] move logic to virtualrow transform --- .../QueryPlan/ReadFromMergeTree.cpp | 20 ++++-- .../Transforms/VirtualRowTransform.cpp | 68 +++++++++++-------- .../Transforms/VirtualRowTransform.h | 25 ++++--- .../MergeTree/MergeTreeSelectProcessor.cpp | 60 +++------------- .../MergeTree/MergeTreeSelectProcessor.h | 8 --- ...1_mergetree_read_in_order_spread.reference | 7 +- ...er_optimization_with_virtual_row.reference | 16 +---- ...in_order_optimization_with_virtual_row.sql | 58 ++-------------- 8 files changed, 90 insertions(+), 172 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 90e499d02f7..264d4cd095d 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -615,15 +615,25 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(part_with_ranges.data_part->getIndex(), part_with_ranges.ranges.front().begin); - if (need_virtual_row) - processor->enableVirtualRow(); auto source = std::make_shared(std::move(processor), data.getLogName()); if (set_total_rows_approx) source->addTotalRowsApprox(total_rows); - pipes.emplace_back(std::move(source)); + Pipe pipe(source); + + if (need_virtual_row) + { + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, + storage_snapshot->metadata->primary_key, + part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); + }); + } + + pipes.emplace_back(std::move(pipe)); } auto pipe = Pipe::unitePipes(std::move(pipes)); @@ -636,8 +646,6 @@ Pipe ReadFromMergeTree::readInOrder( }); } - pipe.addSimpleTransform([](const Block & header){ return std::make_shared(header); }); - return pipe; } diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index e79ede2abec..55b442cefb6 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -1,5 +1,5 @@ #include -#include "Processors/Chunk.h" +#include namespace DB { @@ -9,9 +9,14 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header) - : IProcessor({header}, {header}) +VirtualRowTransform::VirtualRowTransform(const Block & header_, + const KeyDescription & primary_key_, + const IMergeTreeDataPart::Index & index_, + size_t mark_range_begin_) + : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) + , header(header_), primary_key(primary_key_) + , index(index_), mark_range_begin(mark_range_begin_) { } @@ -72,41 +77,50 @@ void VirtualRowTransform::work() if (generated) throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it already was generated"); - current_chunk = generate(); generated = true; can_generate = false; + + if (!is_first) + { + if (current_chunk.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); + return; + } + + is_first = false; + + /// Reorder the columns according to result_header + Columns ordered_columns; + ordered_columns.reserve(header.columns()); + for (size_t i = 0, j = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + // ordered_columns.push_back(current_column->cloneResized(1)); + + if (j < index->size() && type_and_name.name == primary_key.column_names[j] + && type_and_name.type == primary_key.data_types[j]) + { + auto column = current_column->cloneEmpty(); + column->insert((*(*index)[j])[mark_range_begin]); + ordered_columns.push_back(std::move(column)); + ++j; + } + else + ordered_columns.push_back(current_column->cloneResized(1)); + } + + current_chunk.setColumns(ordered_columns, 1); + current_chunk.getChunkInfos().add(std::make_shared(0, true)); } else { if (!has_input) throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it wasn't read"); - consume(std::move(current_chunk)); has_input = false; can_generate = true; } } -void VirtualRowTransform::consume(Chunk chunk) -{ - if (!is_first) - { - temp_chunk = std::move(chunk); - return; - } - - is_first = false; - temp_chunk = std::move(chunk); -} - -Chunk VirtualRowTransform::generate() -{ - if (temp_chunk.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); - - Chunk result; - result.swap(temp_chunk); - return result; -} - } diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index 7f6be5d792e..b9f0cb46242 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -1,15 +1,20 @@ #pragma once -#include -#include +#include +#include +#include namespace DB { +/// Virtual row is useful for read-in-order optimization when multiple parts exist. class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header); + explicit VirtualRowTransform(const Block & header_, + const KeyDescription & primary_key_, + const IMergeTreeDataPart::Index & index_, + size_t mark_range_begin_); String getName() const override { return "VirtualRowTransform"; } @@ -17,19 +22,21 @@ public: void work() override; private: - void consume(Chunk chunk); - Chunk generate(); - InputPort & input; OutputPort & output; Chunk current_chunk; bool has_input = false; bool generated = false; - bool can_generate = false; + bool can_generate = true; + bool is_first = true; - bool is_first = false; - Chunk temp_chunk; + Block header; + KeyDescription primary_key; + /// PK index used in virtual row. + IMergeTreeDataPart::Index index; + /// The first range that might contain the candidate. + size_t mark_range_begin; }; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 4f1df44f68a..ca368a94bd4 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -134,38 +134,22 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - if (enable_virtual_row) + auto res = algorithm->readFromTask(*task, block_size_params); + + if (res.row_count) { - /// Turn on virtual row just once. - enable_virtual_row = false; - - const auto & primary_key = getPrimaryKey(); - - MergeTreeReadTask::BlockAndProgress res; - res.row_count = 1; - /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); - for (size_t i = 0, j = 0; i < result_header.columns(); ++i) + for (size_t i = 0; i < result_header.columns(); ++i) { - const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); - - if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) - { - auto column = current_column->cloneEmpty(); - column->insert((*(*index)[j])[mark_range_begin]); - ordered_columns.push_back(std::move(column)); - ++j; - } - else - ordered_columns.push_back(current_column->cloneResized(1)); + auto name = result_header.getByPosition(i).name; + ordered_columns.push_back(res.block.getByName(name).column); } auto chunk = Chunk(ordered_columns, res.row_count); - chunk.getChunkInfos().add(std::make_shared( - add_part_level ? task->getInfo().data_part->info.level : 0, true)); + if (add_part_level) + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); return ChunkAndProgress{ .chunk = std::move(chunk), @@ -175,33 +159,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } else { - auto res = algorithm->readFromTask(*task, block_size_params); - - if (res.row_count) - { - /// Reorder the columns according to result_header - Columns ordered_columns; - ordered_columns.reserve(result_header.columns()); - for (size_t i = 0; i < result_header.columns(); ++i) - { - auto name = result_header.getByPosition(i).name; - ordered_columns.push_back(res.block.getByName(name).column); - } - - auto chunk = Chunk(ordered_columns, res.row_count); - if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); - - return ChunkAndProgress{ - .chunk = std::move(chunk), - .num_read_rows = res.num_read_rows, - .num_read_bytes = res.num_read_bytes, - .is_finished = false}; - } - else - { - return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; - } + return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index d790d1e266f..6dcb6ca73d2 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -60,12 +60,6 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) - { - index = index_; - mark_range_begin = mark_range_begin_; - } - void enableVirtualRow() { enable_virtual_row = true; } const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } @@ -100,8 +94,6 @@ private: bool enable_virtual_row = false; /// PK index used in virtual row. IMergeTreeDataPart::Index index; - /// The first range that might contain the candidate, used in virtual row. - size_t mark_range_begin; LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index 443f6d3ae93..44e61566deb 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,6 +12,7 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 \ No newline at end of file diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index b4b1554a7d4..3c3a9cf532e 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -2,25 +2,13 @@ 1 2 3 -16386 +16384 ======== 16385 16386 16387 16388 -24578 -======== -0 -1 -2 -3 -16386 -======== -16385 -16386 -16387 -16388 -24578 +24576 ======== 1 2 1 2 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 5bae739bc51..688e427d19d 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -39,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'preliminary merge, no filter'; +log_comment = 'no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'preliminary merge, no filter' +AND log_comment = 'no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -63,68 +63,18 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'preliminary merge with filter'; +log_comment = 'with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'preliminary merge with filter' +AND log_comment = 'with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; -SELECT '========'; --- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), --- both chunks come from the same part. -SELECT x -FROM t -ORDER BY x ASC -LIMIT 4 -SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -max_threads = 1, -optimize_read_in_order = 1, -log_comment = 'no preliminary merge, no filter'; - -SYSTEM FLUSH LOGS; - -SELECT read_rows -FROM system.query_log -WHERE current_database = currentDatabase() -AND log_comment = 'no preliminary merge, no filter' -AND type = 'QueryFinish' -ORDER BY query_start_time DESC -LIMIT 1; - -SELECT '========'; --- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), --- all chunks come from the same part. -SELECT k -FROM t -WHERE k > 8192 * 2 -ORDER BY x ASC -LIMIT 4 -SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -read_in_order_use_buffering = false, --avoid buffer -max_threads = 1, -optimize_read_in_order = 1, -log_comment = 'no preliminary merge, with filter'; - -SYSTEM FLUSH LOGS; - -SELECT read_rows -FROM system.query_log -WHERE current_database = currentDatabase() -AND log_comment = 'no preliminary merge, with filter' -AND type = 'QueryFinish' -ORDER BY query_start_time DESC -LIMIT 1; - -DROP TABLE t; - SELECT '========'; -- from 02149_read_in_order_fixed_prefix DROP TABLE IF EXISTS fixed_prefix; From 503e7490d439e2f0969ef7b09cc2134af154fa1a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 8 Sep 2024 00:55:10 +0000 Subject: [PATCH 034/566] tidy --- src/Processors/Transforms/VirtualRowTransform.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 6 ------ src/Storages/MergeTree/MergeTreeReadTask.h | 3 --- tests/queries/0_stateless/02346_fulltext_index_search.sql | 4 ++-- 4 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 55b442cefb6..9b904fc4ae2 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -98,7 +98,7 @@ void VirtualRowTransform::work() ColumnPtr current_column = type_and_name.type->createColumn(); // ordered_columns.push_back(current_column->cloneResized(1)); - if (j < index->size() && type_and_name.name == primary_key.column_names[j] + if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 491aa26343d..177a325ea5a 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -161,12 +161,6 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam auto read_result = range_readers.main.read(rows_to_read, mark_ranges); - if (add_virtual_row) - { - /// Now we have the virtual row, which is at most once for each part. - add_virtual_row = false; - } - /// All rows were filtered. Repeat. if (read_result.num_rows == 0) read_result.columns.clear(); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index a44d4e4fabd..e90a07e0b55 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -162,9 +162,6 @@ private: /// Used to satistfy preferred_block_size_bytes limitation MergeTreeBlockSizePredictorPtr size_predictor; - - /// If true, add once, and then set false. - bool add_virtual_row = false; }; using MergeTreeReadTaskPtr = std::unique_ptr; diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index f0505f63124..80f49790201 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 0; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; -- check the query only read 3 granules (6 rows total; each granule has 2 rows) SYSTEM FLUSH LOGS; SELECT read_rows==6 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 0;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; From b232205b4407e185b3a17bc261c9fd977d0c0e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 8 Sep 2024 22:22:06 +0300 Subject: [PATCH 035/566] Fix unexpected part path check --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 68aa370959c..fb2bc2fada7 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -582,7 +582,7 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl if (deduplicate && deduplicated) { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; - if (!startsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_")) + if (!endsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_" + part->name + "/")) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); From 26e74bc9eec77da69c727fa2946041257bc877ce Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 9 Sep 2024 14:29:41 +0000 Subject: [PATCH 036/566] move virtual row flag to class member --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 11 +++++------ src/Processors/QueryPlan/ReadFromMergeTree.h | 4 +++- src/Storages/MergeTree/MergeTreeSequentialSource.cpp | 1 - .../01551_mergetree_read_in_order_spread.reference | 10 ++++++---- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 264d4cd095d..599a33f1777 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -513,8 +513,7 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit, - bool need_virtual_row) + UInt64 read_limit) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -622,7 +621,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (need_virtual_row) + if (enable_virtual_row) { pipe.addSimpleTransform([&](const Block & header) { @@ -1061,10 +1060,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (auto && item : splitted_parts_and_ranges) { - /// need_virtual_row = true means a MergingSortedTransform should occur. + /// enable_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); + enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 20c9cfafc7e..7a0b22d87c4 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -239,7 +239,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool need_virtual_row = false); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); @@ -269,6 +269,8 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; + + bool enable_virtual_row = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index edeac12a1df..e799dc0b20e 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -14,7 +14,6 @@ #include #include #include - #include #include diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index 44e61566deb..e83c2e906d1 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,7 +12,9 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 \ No newline at end of file + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 From 4a67c68d0bc6ef337a011c044ac56899265f3b0e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 10 Sep 2024 01:31:01 +0000 Subject: [PATCH 037/566] only focus on the direct mergesort case --- .../QueryPlan/ReadFromMergeTree.cpp | 6 +- src/Processors/QueryPlan/SortingStep.cpp | 64 ---------- src/Processors/QueryPlan/SortingStep.h | 2 - src/QueryPipeline/QueryPipelineBuilder.h | 2 - .../MergeTree/MergeTreeSelectProcessor.cpp | 2 - .../MergeTree/MergeTreeSelectProcessor.h | 12 -- src/Storages/MergeTree/MergeTreeSource.h | 2 - .../02521_aggregation_by_partitions.reference | 112 +++++++++++++----- 8 files changed, 83 insertions(+), 119 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 599a33f1777..a5c7af01d55 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -392,7 +392,7 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas( auto algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size_copy, reader_settings); auto source = std::make_shared(std::move(processor), data.getLogName()); @@ -491,7 +491,7 @@ Pipe ReadFromMergeTree::readFromPool( auto algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size_copy, reader_settings); auto source = std::make_shared(std::move(processor), data.getLogName()); @@ -610,7 +610,7 @@ Pipe ReadFromMergeTree::readInOrder( algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index aa909bef8a9..48fad9f5fdb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -14,9 +13,6 @@ #include #include -#include -#include -#include #include @@ -247,69 +243,11 @@ void SortingStep::finishSorting( }); } -void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const -{ - /// We check every step of this pipeline, to make sure virtual row can work correctly. - /// Currently ExpressionTransform is supported, should add other processors if possible. - const auto& pipe = pipeline.getPipe(); - bool enable_virtual_row = true; - std::vector> merge_tree_sources; - for (const auto & processor : pipe.getProcessors()) - { - if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) - { - merge_tree_sources.push_back(merge_tree_source); - } - else if (!std::dynamic_pointer_cast(processor) && !std::dynamic_pointer_cast(processor)) - { - enable_virtual_row = false; - break; - } - } - - /// If everything is okay, enable virtual row in MergeTreeSelectProcessor. - if (enable_virtual_row && merge_tree_sources.size() >= 2) - { - auto extractNameAfterDot = [](const String & name) - { - size_t pos = name.find_last_of('.'); - return (pos != String::npos) ? name.substr(pos + 1) : name; - }; - - const ColumnWithTypeAndName & type_and_name = pipeline.getHeader().getByPosition(0); - String column_name = extractNameAfterDot(type_and_name.name); - for (const auto & merge_tree_source : merge_tree_sources) - { - const auto & merge_tree_select_processor = merge_tree_source->getProcessor(); - - /// Check pk is not func based, as we only check type and name in filling in primary key of virtual row. - const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); - const auto & actions = primary_key.expression->getActions(); - bool is_okay = true; - for (const auto & action : actions) - { - if (action.node->type != ActionsDAG::ActionType::INPUT) - { - is_okay = false; - break; - } - } - - /// We have to check further in the case of fixed prefix, for example, - /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, - /// merge sort would sort based on b, leading to wrong result in comparison. - if (is_okay && primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) - merge_tree_select_processor->enableVirtualRow(); - } - } -} - void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescription & result_sort_desc, const UInt64 limit_) { /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - if (use_buffering && sort_settings.read_in_order_use_buffering) { pipeline.addSimpleTransform([&](const Block & header) @@ -318,8 +256,6 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr }); } - enableVirtualRow(pipeline); - auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index e6f3a07b907..b4a49394a13 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -118,8 +118,6 @@ private: UInt64 limit_, bool skip_partial_sort = false); - void enableVirtualRow(const QueryPipelineBuilder & pipeline) const; - Type type; SortDescription prefix_description; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 22df1d8ea48..a9e5b1535c0 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,8 +197,6 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } - const Pipe& getPipe() const { return pipe; } - /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index ca368a94bd4..85f545d2a51 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -26,14 +26,12 @@ namespace ErrorCodes MergeTreeSelectProcessor::MergeTreeSelectProcessor( MergeTreeReadPoolPtr pool_, MergeTreeSelectAlgorithmPtr algorithm_, - const StorageSnapshotPtr & storage_snapshot_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, const MergeTreeReadTask::BlockSizeParams & block_size_params_, const MergeTreeReaderSettings & reader_settings_) : pool(std::move(pool_)) , algorithm(std::move(algorithm_)) - , storage_snapshot(storage_snapshot_) , prewhere_info(prewhere_info_) , actions_settings(actions_settings_) , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps)) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 6dcb6ca73d2..7a9cebbcb2e 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -36,7 +36,6 @@ public: MergeTreeSelectProcessor( MergeTreeReadPoolPtr pool_, MergeTreeSelectAlgorithmPtr algorithm_, - const StorageSnapshotPtr & storage_snapshot_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, const MergeTreeReadTask::BlockSizeParams & block_size_params_, @@ -60,17 +59,12 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void enableVirtualRow() { enable_virtual_row = true; } - - const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } - private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); const MergeTreeReadPoolPtr pool; const MergeTreeSelectAlgorithmPtr algorithm; - const StorageSnapshotPtr storage_snapshot; const PrewhereInfoPtr prewhere_info; const ExpressionActionsSettings actions_settings; @@ -89,12 +83,6 @@ private: /// Should we add part level to produced chunk. Part level is useful for next steps if query has FINAL bool add_part_level = false; - /// Should we add a virtual row as the single first chunk. - /// Virtual row is useful for read-in-order optimization when multiple parts exist. - bool enable_virtual_row = false; - /// PK index used in virtual row. - IMergeTreeDataPart::Index index; - LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index 287f2f5ac63..7506af4f9b8 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -19,8 +19,6 @@ public: Status prepare() override; - const MergeTreeSelectProcessorPtr& getProcessor() const { return processor; } - #if defined(OS_LINUX) int schedule() override; #endif diff --git a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference index 87b2d5c3430..addc36421c3 100644 --- a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference +++ b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference @@ -160,52 +160,100 @@ ExpressionTransform × 16 (ReadFromMergeTree) MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 1000000 Skip merging: 1 Skip merging: 1 From 79e1ce1d4bd1e032b7890f27386dbf9c043e49c0 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 12 Sep 2024 23:54:16 +0000 Subject: [PATCH 038/566] fix --- src/Processors/QueryPlan/ReadFromMergeTree.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index a09d31155dc..b43217db598 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -282,12 +282,8 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; -<<<<<<< LessReadInOrder - bool enable_virtual_row = false; -======= std::optional number_of_current_replica; ->>>>>>> master }; } From 084c8115fe55440d363999dd498f77c02306c467 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 13 Sep 2024 21:09:03 +0000 Subject: [PATCH 039/566] support non-preliminary merge --- .../Optimizations/optimizeReadInOrder.cpp | 2 + .../QueryPlan/ReadFromMergeTree.cpp | 8 ++- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 + ...er_optimization_with_virtual_row.reference | 12 ++++ ...in_order_optimization_with_virtual_row.sql | 58 +++++++++++++++++-- 5 files changed, 75 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index ac7fcdcf83f..c41122c26b2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -820,6 +820,8 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n bool can_read = reading->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); if (!can_read) return nullptr; + + reading->enableVirtualRow(); } return order_info; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 43b034b476a..7a297f6db3b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1099,9 +1099,11 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (auto && item : splitted_parts_and_ranges) { - /// enable_virtual_row = true means a MergingSortedTransform should occur. - /// If so, adding a virtual row might speedup in the case of multiple parts. - enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + /// If not enabled before, try to enable it when conditions meet as in the following section of preliminary merge, + /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. + if (!enable_virtual_row) + enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index b43217db598..ccb56c3f31a 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -210,6 +210,8 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; + void enableVirtualRow() { enable_virtual_row = true; } + private: int getSortDirection() const { diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 3c3a9cf532e..7106ddc157c 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -10,6 +10,18 @@ 16388 24576 ======== +0 +1 +2 +3 +16384 +======== +16385 +16386 +16387 +16388 +24578 +======== 1 2 1 2 1 3 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 688e427d19d..5bae739bc51 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -39,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no filter'; +log_comment = 'preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no filter' +AND log_comment = 'preliminary merge, no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -63,18 +63,68 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'with filter'; +log_comment = 'preliminary merge with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'with filter' +AND log_comment = 'preliminary merge with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, no filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +SELECT '========'; +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +read_in_order_use_buffering = false, --avoid buffer +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +DROP TABLE t; + SELECT '========'; -- from 02149_read_in_order_fixed_prefix DROP TABLE IF EXISTS fixed_prefix; From 2aba6f5b36d959c65d6daaaacb855a7ccf9b26b2 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 13 Sep 2024 21:44:03 +0000 Subject: [PATCH 040/566] avoid conflict with buffering --- .../QueryPlan/Optimizations/optimizeReadInOrder.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index c41122c26b2..29453acca41 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -821,7 +821,10 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - reading->enableVirtualRow(); + bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; + /// Avoid conflict with buffering. + if (!use_buffering) + reading->enableVirtualRow(); } return order_info; From c8d6c177688783b25eb8f88e1f891e9839dac8a7 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 14 Sep 2024 02:22:11 +0000 Subject: [PATCH 041/566] fix --- .../Optimizations/optimizeReadInOrder.cpp | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 17 ++++++++++++----- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- .../02149_read_in_order_fixed_prefix.reference | 18 +++++++++++++----- ...der_optimization_with_virtual_row.reference | 2 +- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 29453acca41..b302534e2f4 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -822,7 +822,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n return nullptr; bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; - /// Avoid conflict with buffering. + /// Avoid conflict with buffering. if (!use_buffering) reading->enableVirtualRow(); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 7a297f6db3b..ac5db8277c2 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -549,7 +549,8 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit) + UInt64 read_limit, + bool enable_current_virtual_row) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -660,7 +661,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_virtual_row) + if (enable_current_virtual_row) { pipe.addSimpleTransform([&](const Block & header) { @@ -1097,14 +1098,20 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } + /// If enabled in the optimization stage, check whether there are more than one branch. + if (enable_virtual_row) + enable_virtual_row = splitted_parts_and_ranges.size() > 1 + || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1); + for (auto && item : splitted_parts_and_ranges) { - /// If not enabled before, try to enable it when conditions meet as in the following section of preliminary merge, + /// If not enabled before, try to enable it when conditions meet, as in the following section of preliminary merge, /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. + bool enable_current_virtual_row = enable_virtual_row; if (!enable_virtual_row) - enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, enable_current_virtual_row)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index ccb56c3f31a..7c0bbdc8dec 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -254,7 +254,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row = false); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index d608364e01b..f7966645e8a 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -14,7 +14,10 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -32,9 +35,11 @@ ExpressionTransform ExpressionTransform × 2 (ReadFromMergeTree) ReverseTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 - ReverseTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + ReverseTransform + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -51,7 +56,10 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 7106ddc157c..ef9f06ec21a 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -20,7 +20,7 @@ 16386 16387 16388 -24578 +24576 ======== 1 2 1 2 From 105639c0878e896b59bea098a51f4354cf831846 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 14 Sep 2024 20:41:36 +0000 Subject: [PATCH 042/566] disable pk function --- .../QueryPlan/ReadFromMergeTree.cpp | 14 ++++++++++++- ...1_mergetree_read_in_order_spread.reference | 9 +++------ ...in_order_optimization_with_virtual_row.sql | 20 +++++++++++++++---- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index ac5db8277c2..02d10dcb46b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1098,6 +1098,17 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } + bool primary_key_type_supports_virtual_row = true; + const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); + for (const auto & action : actions) + { + if (action.node->type != ActionsDAG::ActionType::INPUT) + { + primary_key_type_supports_virtual_row = false; + break; + } + } + /// If enabled in the optimization stage, check whether there are more than one branch. if (enable_virtual_row) enable_virtual_row = splitted_parts_and_ranges.size() > 1 @@ -1111,7 +1122,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( if (!enable_virtual_row) enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, enable_current_virtual_row)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, + enable_current_virtual_row && primary_key_type_supports_virtual_row)); } } diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index e83c2e906d1..443f6d3ae93 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,9 +12,6 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 5bae739bc51..159f38903e3 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -138,7 +138,14 @@ SYSTEM STOP MERGES fixed_prefix; INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -SELECT a, b FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE fixed_prefix; @@ -160,8 +167,13 @@ INSERT INTO function_pk values(1,1); INSERT INTO function_pk values(1,3); INSERT INTO function_pk values(1,2); --- TODO: handle preliminary merge for this case, temporarily disable it -SET optimize_read_in_order = 0; -SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; +SELECT * +FROM function_pk +ORDER BY (A,-B) ASC +limit 3 +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE function_pk; From 45471d841bd906cbd7c4b4e88581c049e759d9f1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 16 Sep 2024 17:41:38 +0000 Subject: [PATCH 043/566] remove default value of enable_current_virtual_row --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 4 ++-- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 02d10dcb46b..fb69bdd5aaa 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -716,7 +716,7 @@ Pipe ReadFromMergeTree::read( if (read_type == ReadType::Default && (max_streams > 1 || checkAllPartsOnRemoteFS(parts_with_range))) return readFromPool(std::move(parts_with_range), std::move(required_columns), std::move(pool_settings)); - auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0); + auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0, false); /// Use ConcatProcessor to concat sources together. /// It is needed to read in parts order (and so in PK order) if single thread is used. @@ -1025,7 +1025,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( /// For parallel replicas the split will be performed on the initiator side. if (is_parallel_reading_from_replicas) { - pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit)); + pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit, false)); } else { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 7c0bbdc8dec..aa1b9dcfdcb 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -254,7 +254,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row = false); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); From 6af5fe48ba2f7d22447056fd148665f350830fe4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 16 Sep 2024 19:43:00 +0000 Subject: [PATCH 044/566] handle the case first prefix fixed --- .../QueryPlan/Optimizations/optimizeReadInOrder.cpp | 10 +++++++--- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- src/Storages/ReadInOrderOptimizer.cpp | 2 +- src/Storages/SelectQueryInfo.h | 11 ++++++++++- ...d_in_order_optimization_with_virtual_row.reference | 6 ++++++ ...31_read_in_order_optimization_with_virtual_row.sql | 9 +++++++++ 6 files changed, 34 insertions(+), 6 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index b302534e2f4..909645098b1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -370,6 +370,7 @@ InputOrderInfoPtr buildInputOrderInfo( int read_direction = 0; size_t next_description_column = 0; size_t next_sort_key = 0; + bool first_prefix_fixed = false; while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size()) { @@ -447,6 +448,9 @@ InputOrderInfoPtr buildInputOrderInfo( } else if (fixed_key_columns.contains(sort_column_node)) { + if (next_sort_key == 0) + first_prefix_fixed = true; + //std::cerr << "+++++++++ Found fixed key by match" << std::endl; ++next_sort_key; } @@ -481,7 +485,7 @@ InputOrderInfoPtr buildInputOrderInfo( if (read_direction == 0 || order_key_prefix_descr.empty()) return nullptr; - return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit); + return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit, first_prefix_fixed); } /// We really need three different sort descriptions here. @@ -685,7 +689,7 @@ AggregationInputOrder buildInputOrderInfo( for (const auto & key : not_matched_group_by_keys) group_by_sort_description.emplace_back(SortColumnDescription(std::string(key))); - auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0); + auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0, false); return { std::move(input_order), std::move(sort_description_for_merging), std::move(group_by_sort_description) }; } @@ -823,7 +827,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; /// Avoid conflict with buffering. - if (!use_buffering) + if (!use_buffering && !order_info->first_prefix_fixed) reading->enableVirtualRow(); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index fb69bdd5aaa..b507172597e 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1808,7 +1808,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, if (direction != 1 && query_info.isFinal()) return false; - query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit); + query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit, false); reader_settings.read_in_order = true; /// In case or read-in-order, don't create too many reading streams. diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index 9c8c4c2fe79..ea7ea218feb 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -249,7 +249,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrderImpl( if (sort_description_for_merging.empty()) return {}; - return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit); + return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit, false); } InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder( diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 7ad6a733c6f..bf1229f7a3a 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -119,13 +119,22 @@ struct InputOrderInfo const int direction; const UInt64 limit; + /** For virtual row optimization only + * for example, when pk is (a,b), a = 1, order by b, virtual row should be + * disabled in the following case: + * 1st part (0, 100), (1, 2), (1, 3), (1, 4) + * 2nd part (0, 100), (1, 2), (1, 3), (1, 4). + */ + bool first_prefix_fixed; + InputOrderInfo( const SortDescription & sort_description_for_merging_, size_t used_prefix_of_sorting_key_size_, - int direction_, UInt64 limit_) + int direction_, UInt64 limit_, bool first_prefix_fixed_) : sort_description_for_merging(sort_description_for_merging_) , used_prefix_of_sorting_key_size(used_prefix_of_sorting_key_size_) , direction(direction_), limit(limit_) + , first_prefix_fixed(first_prefix_fixed_) { } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index ef9f06ec21a..08dabf3ee06 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -28,6 +28,12 @@ 1 3 1 4 1 4 +1 2 +1 2 +1 3 +1 3 +1 4 +1 4 ======== 1 3 1 2 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 159f38903e3..b26f3a48eef 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -147,6 +147,15 @@ read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge + DROP TABLE fixed_prefix; SELECT '========'; From 81a7927b8a24d6e7686ed6bf9bd6f7452428b492 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 17 Sep 2024 15:06:21 +0000 Subject: [PATCH 045/566] handle virtual row in BufferChunksTransform --- src/Processors/QueryPlan/BufferChunksTransform.cpp | 14 ++++++++++++++ .../Optimizations/optimizeReadInOrder.cpp | 4 +--- ...read_in_order_optimization_with_virtual_row.sql | 4 ---- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 3601a68d36e..0d9cee28619 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { @@ -49,13 +50,26 @@ IProcessor::Status BufferChunksTransform::prepare() else if (input.hasData()) { auto chunk = pullChunk(); + bool virtual_row = getVirtualRowFromChunk(chunk); output.push(std::move(chunk)); + if (virtual_row) + { + input.setNotNeeded(); + return Status::PortFull; + } } } if (input.hasData() && (num_buffered_rows < max_rows_to_buffer || num_buffered_bytes < max_bytes_to_buffer)) { auto chunk = pullChunk(); + bool virtual_row = getVirtualRowFromChunk(chunk); + if (virtual_row) + { + output.push(std::move(chunk)); + input.setNotNeeded(); + return Status::PortFull; + } num_buffered_rows += chunk.getNumRows(); num_buffered_bytes += chunk.bytes(); chunks.push(std::move(chunk)); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 909645098b1..e7468a3a3f2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -825,9 +825,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; - /// Avoid conflict with buffering. - if (!use_buffering && !order_info->first_prefix_fixed) + if (!order_info->first_prefix_fixed) reading->enableVirtualRow(); } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index b26f3a48eef..7e3af6c057a 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -108,7 +108,6 @@ ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -read_in_order_use_buffering = false, --avoid buffer max_threads = 1, optimize_read_in_order = 1, log_comment = 'no preliminary merge, with filter'; @@ -143,7 +142,6 @@ FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge @@ -152,7 +150,6 @@ FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge @@ -181,7 +178,6 @@ FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge From a48bd922d9122aa18a4cf1fe196a3418f798c7a4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 17 Sep 2024 20:27:59 +0000 Subject: [PATCH 046/566] fix limit in BufferChunksTransform with virtual row --- src/Processors/QueryPlan/BufferChunksTransform.cpp | 14 ++++++++------ src/Processors/QueryPlan/BufferChunksTransform.h | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- .../02149_read_in_order_fixed_prefix.reference | 8 +++----- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 0d9cee28619..47e2c2ba0d5 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -49,8 +49,8 @@ IProcessor::Status BufferChunksTransform::prepare() } else if (input.hasData()) { - auto chunk = pullChunk(); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row; + auto chunk = pullChunk(virtual_row); output.push(std::move(chunk)); if (virtual_row) { @@ -62,8 +62,8 @@ IProcessor::Status BufferChunksTransform::prepare() if (input.hasData() && (num_buffered_rows < max_rows_to_buffer || num_buffered_bytes < max_bytes_to_buffer)) { - auto chunk = pullChunk(); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row; + auto chunk = pullChunk(virtual_row); if (virtual_row) { output.push(std::move(chunk)); @@ -85,10 +85,12 @@ IProcessor::Status BufferChunksTransform::prepare() return Status::NeedData; } -Chunk BufferChunksTransform::pullChunk() +Chunk BufferChunksTransform::pullChunk(bool & virtual_row) { auto chunk = input.pull(); - num_processed_rows += chunk.getNumRows(); + virtual_row = getVirtualRowFromChunk(chunk); + if (!virtual_row) + num_processed_rows += chunk.getNumRows(); if (limit && num_processed_rows >= limit) input.close(); diff --git a/src/Processors/QueryPlan/BufferChunksTransform.h b/src/Processors/QueryPlan/BufferChunksTransform.h index 752f9910734..fce79eeaef3 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.h +++ b/src/Processors/QueryPlan/BufferChunksTransform.h @@ -24,7 +24,7 @@ public: String getName() const override { return "BufferChunks"; } private: - Chunk pullChunk(); + Chunk pullChunk(bool & virtual_row); InputPort & input; OutputPort & output; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b507172597e..45dcb4616b1 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -661,7 +661,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_current_virtual_row) + if (enable_current_virtual_row && (read_type == ReadType::InOrder)) { pipe.addSimpleTransform([&](const Block & header) { diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index f7966645e8a..31462988c2d 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -35,11 +35,9 @@ ExpressionTransform ExpressionTransform × 2 (ReadFromMergeTree) ReverseTransform - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 - ReverseTransform - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + ReverseTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 From dd6503bb2ba0cb8bbedcc807df7ebe77fc0310c5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 14:10:03 +0000 Subject: [PATCH 047/566] Don't allow Variant/Dynamic types in ORDER BY/GROUP BY/PARTITION BY/PRIMARY KEY by default --- docs/en/operations/settings/settings.md | 22 +++ docs/en/sql-reference/data-types/dynamic.md | 3 + docs/en/sql-reference/data-types/variant.md | 2 + src/Analyzer/Resolve/QueryAnalyzer.cpp | 52 ++++- src/Analyzer/Resolve/QueryAnalyzer.h | 4 + src/Core/Settings.h | 3 + src/Interpreters/ExpressionAnalyzer.cpp | 42 ++++ src/Interpreters/ExpressionAnalyzer.h | 2 + src/Storages/KeyDescription.cpp | 9 + ...mic_variant_in_order_by_group_by.reference | 184 ++++++++++++++++++ ...1_dynamic_variant_in_order_by_group_by.sql | 154 +++++++++++++++ 11 files changed, 472 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference create mode 100644 tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index b177ded3e32..302bc8da78f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5682,3 +5682,25 @@ Default value: `0`. Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown. Default value: `false`. + +## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} + +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. + +Possible values: + +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. +- 0 — Usage of `Variant` and `Dynamic` types is restricted. + +Default value: 0. + +## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} + +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. + +Possible values: + +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. +- 0 — Usage of `Variant` and `Dynamic` types is restricted. + +Default value: 0. diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index f9befd166fe..4d0bf073535 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -411,6 +411,9 @@ SELECT d, dynamicType(d) FROM test ORDER by d; └─────┴────────────────┘ ``` +**Note** by default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + + ## Reaching the limit in number of different data types stored inside Dynamic `Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic). diff --git a/docs/en/sql-reference/data-types/variant.md b/docs/en/sql-reference/data-types/variant.md index 3c2b6e0a362..7cb0f4ad4ea 100644 --- a/docs/en/sql-reference/data-types/variant.md +++ b/docs/en/sql-reference/data-types/variant.md @@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v; └─────┴────────────────┘ ``` +**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + ## JSONExtract functions with Variant All `JSONExtract*` functions support `Variant` type: diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index a18c2901a58..304338109c1 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3962,6 +3962,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ sort_node.getExpression() = sort_column_list_node->getNodes().front(); } + validateSortingKeyType(sort_node.getExpression()->getResultType(), scope); + size_t sort_expression_projection_names_size = sort_expression_projection_names.size(); if (sort_expression_projection_names_size != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, @@ -4047,6 +4049,24 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ return result_projection_names; } +void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef().allow_suspicious_types_in_order_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*sorting_key_type); + sorting_key_type->forEachChild(check); +} + namespace { @@ -4086,11 +4106,12 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR expandTuplesInList(group_by_list); } - if (scope.group_by_use_nulls) + for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) { - for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) + for (const auto & group_by_elem : grouping_set->as()->getNodes()) { - for (const auto & group_by_elem : grouping_set->as()->getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } @@ -4106,14 +4127,35 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR auto & group_by_list = query_node_typed.getGroupBy().getNodes(); expandTuplesInList(group_by_list); - if (scope.group_by_use_nulls) + for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) { - for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } } +/** Validate data types of GROUP BY key. + */ +void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef().allow_suspicious_types_in_group_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*group_by_key_type); + group_by_key_type->forEachChild(check); +} + /** Resolve interpolate columns nodes list. */ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.h b/src/Analyzer/Resolve/QueryAnalyzer.h index 7f9088b35e5..c90ded09876 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.h +++ b/src/Analyzer/Resolve/QueryAnalyzer.h @@ -217,8 +217,12 @@ private: ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope); + void validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const; + void resolveGroupByNode(QueryNode & query_node_typed, IdentifierResolveScope & scope); + void validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const; + void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope); void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 23dc2a8fdc5..a3c58144fd0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -389,6 +389,9 @@ class IColumn; M(Bool, prefer_global_in_and_join, false, "If enabled, all IN/JOIN operators will be rewritten as GLOBAL IN/JOIN. It's useful when the to-be-joined tables are only available on the initiator and we need to always scatter their data on-the-fly during distributed processing with the GLOBAL keyword. It's also useful to reduce the need to access the external sources joining external tables.", 0) \ M(Bool, enable_vertical_final, true, "If enable, remove duplicated rows during FINAL by marking rows as deleted and filtering them later instead of merging rows", 0) \ \ + M(Bool, allow_suspicious_types_in_group_by, false, "Allow suspicious types like Variant/Dynamic in GROUP BY clause", 0) \ + M(Bool, allow_suspicious_types_in_order_by, false, "Allow suspicious types like Variant/Dynamic in ORDER BY clause", 0) \ + \ \ /** Limits during query execution are part of the settings. \ * Used to provide a more safe execution of queries from the user interface. \ diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 7063b2162a0..166b6619bdc 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1367,6 +1367,9 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain } } + for (const auto & result_column : step.getResultColumns()) + validateGroupByKeyType(result_column.type); + if (optimize_aggregation_in_order) { for (auto & child : asts) @@ -1381,6 +1384,24 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain return true; } +void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef().allow_suspicious_types_in_group_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + void SelectQueryExpressionAnalyzer::appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getAggregatingQuery(); @@ -1564,6 +1585,9 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr getRootActions(select_query->orderBy(), only_types, step.actions()->dag); + for (const auto & result_column : step.getResultColumns()) + validateOrderByKeyType(result_column.type); + bool with_fill = false; for (auto & child : select_query->orderBy()->children) @@ -1643,6 +1667,24 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr return actions; } +void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef().allow_suspicious_types_in_order_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getSelectQuery(); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index dc038e10594..3b006ee2106 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -397,6 +397,7 @@ private: ActionsAndProjectInputsFlagPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types); bool appendWhere(ExpressionActionsChain & chain, bool only_types); bool appendGroupBy(ExpressionActionsChain & chain, bool only_types, bool optimize_aggregation_in_order, ManyExpressionActions &); + void validateGroupByKeyType(const DataTypePtr & key_type) const; void appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types); void appendWindowFunctionsArguments(ExpressionActionsChain & chain, bool only_types); @@ -409,6 +410,7 @@ private: bool appendHaving(ExpressionActionsChain & chain, bool only_types); /// appendSelect ActionsAndProjectInputsFlagPtr appendOrderBy(ExpressionActionsChain & chain, bool only_types, bool optimize_read_in_order, ManyExpressionActions &); + void validateOrderByKeyType(const DataTypePtr & key_type) const; bool appendLimitBy(ExpressionActionsChain & chain, bool only_types); /// appendProjectResult }; diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 7e43966556e..bb0b6d3542d 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -151,6 +151,15 @@ KeyDescription KeyDescription::getSortingKeyFromAST( throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column {} with type {} is not allowed in key expression, it's not comparable", backQuote(result.sample_block.getByPosition(i).name), result.data_types.back()->getName()); + + auto check = [&](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column with type Variant/Dynamic is not allowed in key expression"); + }; + + check(*result.data_types.back()); + result.data_types.back()->forEachChild(check); } return result; diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference new file mode 100644 index 00000000000..a3eac1cf3fa --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -0,0 +1,184 @@ +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +4 +3 +2 +0 +1 +4 +3 +2 +[4] +[3] +[2] +[0] +[1] +{'str':0} +{'str':1} +{'str':4} +{'str':3} +{'str':2} +0 +1 +4 +3 +2 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +4 +3 +2 +0 +1 +4 +3 +2 +[4] +[3] +[2] +[0] +[1] +{'str':0} +{'str':1} +{'str':4} +{'str':3} +{'str':2} +\N +0 +1 +4 +3 +2 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[4] +[0] +[1] +[2] +[3] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[4] +[0] +[1] +[2] +[3] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql new file mode 100644 index 00000000000..a4ea6425622 --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -0,0 +1,154 @@ +set allow_experimental_variant_type=1; +set allow_experimental_dynamic_type=1; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Variant(UInt64)) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Dynamic) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +drop table test; + +create table test (d Variant(UInt64)) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +drop table test; From 3923efbabf2a3273a055e2889a0df19a517b0b6b Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 14:11:07 +0000 Subject: [PATCH 048/566] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 5e831c6301c..c2e5e51ab75 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,6 +75,8 @@ static std::initializer_list Date: Wed, 18 Sep 2024 19:54:37 +0200 Subject: [PATCH 049/566] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: János Benjamin Antal --- docs/en/operations/settings/settings.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 7dde006b14d..56341205bf7 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5689,18 +5689,18 @@ Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) a Possible values: -- 1 — Usage of `Variant` and `Dynamic` types is not restricted. - 0 — Usage of `Variant` and `Dynamic` types is restricted. +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. Default value: 0. -## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} +## allow_suspicious_types_in_order_by {#allow_suspicious_types_in_order_by} -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys. Possible values: -- 1 — Usage of `Variant` and `Dynamic` types is not restricted. - 0 — Usage of `Variant` and `Dynamic` types is restricted. +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. Default value: 0. From c0c04eabbc20d5ab69066d0c0fb8c1339602f0b5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 18:50:16 +0000 Subject: [PATCH 050/566] Update test --- ...mic_variant_in_order_by_group_by.reference | 10 +++---- ...1_dynamic_variant_in_order_by_group_by.sql | 28 +++++++++++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference index a3eac1cf3fa..5c7b4cb0bea 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -40,9 +40,9 @@ {'str':2} 0 1 -4 -3 2 +3 +4 \N 0 1 @@ -84,12 +84,12 @@ {'str':4} {'str':3} {'str':2} -\N 0 1 -4 -3 2 +3 +4 +\N 0 1 2 diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql index a4ea6425622..6e4a39c7234 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -28,7 +28,7 @@ insert into test select * from numbers(5); set allow_experimental_analyzer=1; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -36,6 +36,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -54,11 +57,11 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -66,6 +69,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -84,7 +90,7 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; drop table test; @@ -93,7 +99,7 @@ insert into test select * from numbers(5); set allow_experimental_analyzer=1; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -101,6 +107,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -119,11 +128,11 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -131,6 +140,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -149,6 +161,6 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; drop table test; From cb488681eb43016e6b9af904e12243b8bb0aea27 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 18:51:46 +0000 Subject: [PATCH 051/566] Fix style --- src/Databases/enableAllExperimentalSettings.cpp | 2 ++ src/Interpreters/ExpressionAnalyzer.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index 9abe05d7bce..01e989dc10b 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -32,6 +32,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_suspicious_low_cardinality_types", 1); context->setSetting("allow_suspicious_fixed_string_types", 1); + context->setSetting("allow_suspicious_types_in_group_by", 1); + context->setSetting("allow_suspicious_types_in_order_by", 1); context->setSetting("allow_suspicious_indices", 1); context->setSetting("allow_suspicious_codecs", 1); context->setSetting("allow_hyperscan", 1); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 9dcf4cd76e4..2df006aff9b 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -98,6 +98,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int UNKNOWN_IDENTIFIER; extern const int UNKNOWN_TYPE_OF_AST_NODE; + extern const int ILLEGAL_COLUMN; } namespace From fd021f658df9ecef6804da3885067061f842e5b2 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 18 Sep 2024 20:01:11 +0000 Subject: [PATCH 052/566] check steps before mergesort --- .../Optimizations/optimizeReadInOrder.cpp | 17 ++++++++-- .../QueryPlan/ReadFromMergeTree.cpp | 17 +++++----- src/Processors/QueryPlan/ReadFromMergeTree.h | 14 ++++++-- ...er_optimization_with_virtual_row.reference | 2 ++ ...in_order_optimization_with_virtual_row.sql | 32 +++++++++++++++++++ 5 files changed, 70 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index e7468a3a3f2..d3ecb3cac6b 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -94,6 +94,17 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node, StepStack & bac return nullptr; } +static bool checkVirtualRowSupport(const StepStack & backward_path) +{ + for (size_t i = 0; i < backward_path.size() - 1; i++) + { + IQueryPlanStep * step = backward_path[i]; + if (!typeid_cast(step) && !typeid_cast(step)) + return false; + } + return true; +} + void updateStepsDataStreams(StepStack & steps_to_update) { /// update data stream's sorting properties for found transforms @@ -825,8 +836,10 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - if (!order_info->first_prefix_fixed) - reading->enableVirtualRow(); + if (!checkVirtualRowSupport(backward_path)) + reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::No); + else if (!order_info->first_prefix_fixed) + reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::Possible); } return order_info; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 45dcb4616b1..2ac663e0680 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1109,17 +1109,18 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( } } - /// If enabled in the optimization stage, check whether there are more than one branch. - if (enable_virtual_row) - enable_virtual_row = splitted_parts_and_ranges.size() > 1 - || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1); + /// If possible in the optimization stage, check whether there are more than one branch. + if (virtual_row_status == VirtualRowStatus::Possible) + virtual_row_status = splitted_parts_and_ranges.size() > 1 + || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) + ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; for (auto && item : splitted_parts_and_ranges) { - /// If not enabled before, try to enable it when conditions meet, as in the following section of preliminary merge, - /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. - bool enable_current_virtual_row = enable_virtual_row; - if (!enable_virtual_row) + bool enable_current_virtual_row = false; + if (virtual_row_status == VirtualRowStatus::Yes) + enable_current_virtual_row = true; + else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index aa1b9dcfdcb..767fcf3b0f8 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -108,6 +108,14 @@ public: using AnalysisResultPtr = std::shared_ptr; + enum class VirtualRowStatus + { + NoConsiderInLogicalPlan, + Possible, + No, + Yes, + }; + ReadFromMergeTree( MergeTreeData::DataPartsVector parts_, MergeTreeData::MutationsSnapshotPtr mutations_snapshot_, @@ -210,7 +218,7 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; - void enableVirtualRow() { enable_virtual_row = true; } + void setVirtualRowStatus(VirtualRowStatus virtual_row_status_) { virtual_row_status = virtual_row_status_; } private: int getSortDirection() const @@ -284,7 +292,9 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; - bool enable_virtual_row = false; + + VirtualRowStatus virtual_row_status = VirtualRowStatus::NoConsiderInLogicalPlan; + std::optional number_of_current_replica; }; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 08dabf3ee06..499ac19d374 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -38,3 +38,5 @@ 1 3 1 2 1 1 +-- test distinct ---- +0 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 7e3af6c057a..4c7bc5d17c7 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -182,3 +182,35 @@ optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE function_pk; + +-- modified from 02317_distinct_in_order_optimization +SELECT '-- test distinct ----'; + +DROP TABLE IF EXISTS distinct_in_order SYNC; + +CREATE TABLE distinct_in_order +( + `a` int, + `b` int, + `c` int +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SYSTEM STOP MERGES distinct_in_order; + +INSERT INTO distinct_in_order SELECT + number % number, + number % 5, + number % 10 +FROM numbers(1, 1000000); + +SELECT DISTINCT a +FROM distinct_in_order +ORDER BY a ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, +optimize_read_in_order = 1, +max_threads = 2; + +DROP TABLE distinct_in_order; From 926e28e35cb1d17d0bb66c06b613671d3eeeeac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Thu, 19 Sep 2024 02:52:23 +0300 Subject: [PATCH 053/566] Rollback part rename if it was deduplicated --- .../MergeTree/ReplicatedMergeTreeSink.cpp | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index fb2bc2fada7..98c46edda25 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -583,7 +583,7 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; if (!endsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_" + part->name + "/")) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a deduplicated part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); } @@ -1013,16 +1013,6 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: } } - transaction.rollback(); - - if (!Coordination::isUserError(multi_code)) - throw Exception( - ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR, - "Unexpected ZooKeeper error while adding block {} with ID '{}': {}", - block_number, - toString(block_id), - multi_code); - auto failed_op_idx = zkutil::getFailedOpIndex(multi_code, responses); String failed_op_path = ops[failed_op_idx]->getPath(); @@ -1032,6 +1022,10 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: LOG_INFO(log, "Block with ID {} already exists (it was just appeared) for part {}. Ignore it.", toString(block_id), part->name); + transaction.rollbackPartsToTemporaryState(); + part->is_temp = true; + part->renameTo(temporary_part_relative_path, false); + if constexpr (async_insert) { retry_context.conflict_block_ids = std::vector({failed_op_path}); @@ -1043,6 +1037,16 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: return CommitRetryContext::DUPLICATED_PART; } + transaction.rollback(); // Not in working set (data_parts) + + if (!Coordination::isUserError(multi_code)) + throw Exception( + ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR, + "Unexpected ZooKeeper error while adding block {} with ID '{}': {}", + block_number, + toString(block_id), + multi_code); + if (multi_code == Coordination::Error::ZNONODE && failed_op_idx == block_unlock_op_idx) throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Insert query (for block {}) was canceled by concurrent ALTER PARTITION or TRUNCATE", From f570e8e2c0715001ac0f1633c898699700068edb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Thu, 19 Sep 2024 13:34:51 +0300 Subject: [PATCH 054/566] Remove debug comment --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 98c46edda25..3f5c70adb64 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -1037,7 +1037,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: return CommitRetryContext::DUPLICATED_PART; } - transaction.rollback(); // Not in working set (data_parts) + transaction.rollback(); if (!Coordination::isUserError(multi_code)) throw Exception( From e290745fe113efdba60cd5c807b92ae415c03d77 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 19 Sep 2024 12:39:57 +0000 Subject: [PATCH 055/566] Fix tests --- tests/queries/0_stateless/02989_variant_comparison.sql | 1 + tests/queries/0_stateless/03035_dynamic_sorting.sql | 1 + .../03036_dynamic_read_shared_subcolumns_small.sql.j2 | 1 + .../0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 | 1 + tests/queries/0_stateless/03096_variant_in_primary_key.sql | 1 + tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql | 1 + .../queries/0_stateless/03151_dynamic_type_scale_max_types.sql | 2 +- tests/queries/0_stateless/03158_dynamic_type_from_variant.sql | 1 + tests/queries/0_stateless/03159_dynamic_type_all_types.sql | 2 +- tests/queries/0_stateless/03162_dynamic_type_nested.sql | 1 + tests/queries/0_stateless/03163_dynamic_as_supertype.sql | 1 + .../03228_dynamic_serializations_uninitialized_value.sql | 1 + .../queries/0_stateless/03231_dynamic_not_safe_primary_key.sql | 1 + 13 files changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02989_variant_comparison.sql b/tests/queries/0_stateless/02989_variant_comparison.sql index e0dcbc97c27..4d09933fb7b 100644 --- a/tests/queries/0_stateless/02989_variant_comparison.sql +++ b/tests/queries/0_stateless/02989_variant_comparison.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; create table test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) engine=Memory; diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.sql b/tests/queries/0_stateless/03035_dynamic_sorting.sql index e0039a348c6..b2f36fed08e 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.sql +++ b/tests/queries/0_stateless/03035_dynamic_sorting.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by=1; drop table if exists test; create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 index dde4f3f53c3..d6732d91e74 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 index 3253d7a6c68..daf85077160 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03096_variant_in_primary_key.sql b/tests/queries/0_stateless/03096_variant_in_primary_key.sql index 48fbc821bcc..c422b4c3cc5 100644 --- a/tests/queries/0_stateless/03096_variant_in_primary_key.sql +++ b/tests/queries/0_stateless/03096_variant_in_primary_key.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; drop table if exists test; create table test (id UInt64, v Variant(UInt64, String)) engine=MergeTree order by (id, v); insert into test values (1, 1), (1, 'str_1'), (1, 2), (1, 'str_2'); diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql index 71d5dd4abd1..0e5119a38e0 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; DROP TABLE IF EXISTS null_table; CREATE TABLE null_table diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql index e476d34a1db..30a86dbc892 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql @@ -1,5 +1,5 @@ SET allow_experimental_dynamic_type=1; -set min_compress_block_size = 585572, max_compress_block_size = 373374, max_block_size = 60768, max_joined_block_size_rows = 18966, max_insert_threads = 5, max_threads = 50, max_read_buffer_size = 708232, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, use_uncompressed_cache = true, stream_like_engine_allow_direct_select = true, replication_wait_for_inactive_replica_timeout = 30, compile_aggregate_expressions = false, min_count_to_compile_aggregate_expression = 0, compile_sort_description = false, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 12610083, enable_memory_bound_merging_of_aggregation_results = false, min_chunk_bytes_for_parallel_parsing = 18769830, merge_tree_coarse_index_granularity = 12, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 10737418240, log_queries = true, insert_quorum_timeout = 60000, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.05000000074505806, http_response_buffer_size = 294986, fsync_metadata = true, http_send_timeout = 60., http_receive_timeout = 60., opentelemetry_start_trace_probability = 0.10000000149011612, max_bytes_before_external_group_by = 1, max_bytes_before_external_sort = 10737418240, max_bytes_before_remerge_sort = 1326536545, max_untracked_memory = 1048576, memory_profiler_step = 1048576, log_comment = '03151_dynamic_type_scale_max_types.sql', send_logs_level = 'fatal', prefer_localhost_replica = false, optimize_read_in_order = false, optimize_aggregation_in_order = true, aggregation_in_order_max_block_bytes = 27069500, read_in_order_two_level_merge_threshold = 75, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, remote_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, remote_filesystem_read_prefetch = false, merge_tree_compact_parts_min_granules_to_multibuffer_read = 119, async_insert_busy_timeout_max_ms = 5000, read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true, filesystem_cache_segments_batch_size = 10, use_page_cache_for_disks_without_file_cache = true, page_cache_inject_eviction = true, allow_prefetched_read_pool_for_remote_filesystem = false, filesystem_prefetch_step_marks = 50, filesystem_prefetch_min_bytes_for_single_read_task = 16777216, filesystem_prefetch_max_memory_usage = 134217728, filesystem_prefetches_limit = 10, optimize_sorting_by_input_stream_properties = false, allow_experimental_dynamic_type = true, session_timezone = 'Africa/Khartoum', prefer_warmed_unmerged_parts_seconds = 2; +SET allow_suspicious_types_in_order_by=1; drop table if exists to_table; diff --git a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql index a18f985f217..429ac21b5eb 100644 --- a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql +++ b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql @@ -1,5 +1,6 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; CREATE TABLE test_dynamic (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index 28b679e2214..cf8ba687d3f 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -3,7 +3,7 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; - +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; -- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) diff --git a/tests/queries/0_stateless/03162_dynamic_type_nested.sql b/tests/queries/0_stateless/03162_dynamic_type_nested.sql index 94007459a9e..59c22491957 100644 --- a/tests/queries/0_stateless/03162_dynamic_type_nested.sql +++ b/tests/queries/0_stateless/03162_dynamic_type_nested.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql index baba637eea4..e859fbd1815 100644 --- a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql +++ b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; SELECT if(number % 2, number::Dynamic(max_types=3), ('str_' || toString(number))::Dynamic(max_types=2)) AS d, toTypeName(d), dynamicType(d) FROM numbers(4); CREATE TABLE dynamic_test_1 (d Dynamic(max_types=3)) ENGINE = Memory; INSERT INTO dynamic_test_1 VALUES ('str_1'), (42::UInt64); diff --git a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql index 8a565fe36b9..60e2439d45f 100644 --- a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql +++ b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type=1; +set allow_suspicious_types_in_group_by=1; set cast_keep_nullable=1; SELECT toFixedString('str', 3), 3, CAST(if(1 = 0, toInt8(3), NULL), 'Int32') AS x from numbers(10) GROUP BY GROUPING SETS ((CAST(toInt32(1), 'Int32')), ('str', 3), (CAST(toFixedString('str', 3), 'Dynamic')), (CAST(toFixedString(toFixedString('str', 3), 3), 'Dynamic'))); diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql index f207581f482..101c7cfe8fa 100644 --- a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql +++ b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t0; DROP TABLE IF EXISTS t1; CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0); From 050b51799ce1e636f7806cb7af6d1bbb1cf481e5 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 19 Sep 2024 14:48:38 +0000 Subject: [PATCH 056/566] add inner and outer read-in-order virtual row test --- ...in_order_optimization_with_virtual_row.sql | 3 ++- ...ization_with_virtual_row_explain.reference | 25 ++++++++++++++++++ ..._optimization_with_virtual_row_explain.sql | 26 +++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 4c7bc5d17c7..f66b4be2c69 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -196,7 +196,8 @@ CREATE TABLE distinct_in_order ) ENGINE = MergeTree ORDER BY (a, b) -SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +SETTINGS index_granularity = 8192, +index_granularity_bytes = '10Mi'; SYSTEM STOP MERGES distinct_in_order; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference new file mode 100644 index 00000000000..33ef6b19222 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference @@ -0,0 +1,25 @@ +(Expression) +ExpressionTransform + (Sorting) + MergingSortedTransform 4 → 1 + (Expression) + ExpressionTransform × 4 + (ReadFromMergeTree) + ExpressionTransform × 5 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + ExpressionTransform + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql new file mode 100644 index 00000000000..668b21275b4 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -0,0 +1,26 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `t` DateTime +) +ENGINE = MergeTree +ORDER BY t +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT toDateTime('2024-01-10') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-30') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-20') + number FROM numbers(10000); + +EXPLAIN PIPELINE +SELECT * +FROM tab +ORDER BY t ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, max_threads = 4, read_in_order_use_buffering = 0 +FORMAT tsv; \ No newline at end of file From b4e5c11fd775cf915dff9e816673fb699c99a307 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 20 Sep 2024 02:11:29 +0000 Subject: [PATCH 057/566] fix --- ...3031_read_in_order_optimization_with_virtual_row_explain.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql index 668b21275b4..8cdcb4628ec 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -1,4 +1,4 @@ --- Tags: no-random-merge-tree-settings +-- Tags: no-random-merge-tree-settings, no-object-storage SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; From 82b4986ee35f974efb48f7ffbb6c698d4e363e43 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 21 Sep 2024 14:53:45 +0000 Subject: [PATCH 058/566] use empty chunk with pk block --- .../Merges/Algorithms/MergeTreeReadInfo.h | 41 ++++++++++++++++--- .../Algorithms/MergingSortedAlgorithm.cpp | 12 +++++- src/Processors/Merges/IMergingTransform.cpp | 4 +- .../QueryPlan/BufferChunksTransform.cpp | 2 +- .../QueryPlan/ReadFromMergeTree.cpp | 21 ++++++++-- .../Transforms/VirtualRowTransform.cpp | 33 ++++----------- .../Transforms/VirtualRowTransform.h | 11 +---- .../MergeTree/MergeTreeSelectProcessor.cpp | 2 +- .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- 9 files changed, 77 insertions(+), 51 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 862fa1b5e9a..425df2c24b9 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB { @@ -10,13 +11,16 @@ class MergeTreeReadInfo : public ChunkInfoCloneable { public: MergeTreeReadInfo() = delete; - explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : - origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } + explicit MergeTreeReadInfo(size_t part_level) : + origin_merge_tree_part_level(part_level) {} + explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_) : + origin_merge_tree_part_level(part_level), pk_block(pk_block_) {} MergeTreeReadInfo(const MergeTreeReadInfo & other) = default; size_t origin_merge_tree_part_level = 0; - /// If virtual_row is true, the chunk must contain the virtual row only. - bool virtual_row = false; + + /// If is virtual_row, block should not be empty. + Block pk_block; }; inline size_t getPartLevelFromChunk(const Chunk & chunk) @@ -27,12 +31,37 @@ inline size_t getPartLevelFromChunk(const Chunk & chunk) return 0; } -inline bool getVirtualRowFromChunk(const Chunk & chunk) +inline bool isVirtualRow(const Chunk & chunk) { const auto read_info = chunk.getChunkInfos().get(); if (read_info) - return read_info->virtual_row; + return read_info->pk_block.columns() > 0; return false; } +inline void setVirtualRow(Chunk & chunk, const Block & header) +{ + const auto read_info = chunk.getChunkInfos().get(); + chassert(read_info); + + const Block & pk_block = read_info->pk_block; + + Columns ordered_columns; + ordered_columns.reserve(header.columns()); + + for (size_t i = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + + size_t pos = type_and_name.name.find_last_of("."); + String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + + const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + } + + chunk.setColumns(ordered_columns, 1); +} + } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 9476d46d939..75c04c8ddb2 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -55,6 +55,14 @@ void MergingSortedAlgorithm::addInput() void MergingSortedAlgorithm::initialize(Inputs inputs) { + for (auto & input : inputs) + { + if (!isVirtualRow(input.chunk)) + continue; + + setVirtualRow(input.chunk, header); + } + removeConstAndSparse(inputs); merged_data.initialize(header, inputs); current_inputs = std::move(inputs); @@ -139,7 +147,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + if (isVirtualRow(current_inputs[current.impl->order].chunk)) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) @@ -238,7 +246,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + if (isVirtualRow(current_inputs[current.impl->order].chunk)) { /// If virtual row is detected, there should be only one row as a single chunk, /// and always skip this chunk to pull the next one. diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index 7488cf4769e..68957cd55f9 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -104,14 +104,14 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// we won't have to read any chunks anymore; /// If virtual row exists, let it pass through, so don't read more chunks. auto chunk = input.pull(true); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row = isVirtualRow(chunk); if (limit_hint == 0 && !virtual_row) input.setNeeded(); if (!virtual_row && ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end)) input.setNeeded(); - if (!chunk.hasRows()) + if (!virtual_row && !chunk.hasRows()) { if (!input.isFinished()) { diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 47e2c2ba0d5..75f5f91d981 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -88,7 +88,7 @@ IProcessor::Status BufferChunksTransform::prepare() Chunk BufferChunksTransform::pullChunk(bool & virtual_row) { auto chunk = input.pull(); - virtual_row = getVirtualRowFromChunk(chunk); + virtual_row = isVirtualRow(chunk); if (!virtual_row) num_processed_rows += chunk.getNumRows(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 2ac663e0680..4b5e33e8b07 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -663,12 +663,25 @@ Pipe ReadFromMergeTree::readInOrder( if (enable_current_virtual_row && (read_type == ReadType::InOrder)) { + const auto & index = part_with_ranges.data_part->getIndex(); + const auto & primary_key = storage_snapshot->metadata->primary_key; + size_t mark_range_begin = part_with_ranges.ranges.front().begin; + + ColumnsWithTypeAndName pk_columns; + pk_columns.reserve(index->size()); + + for (size_t j = 0; j < index->size(); ++j) + { + auto column = primary_key.data_types[j]->createColumn()->cloneEmpty(); + column->insert((*(*index)[j])[mark_range_begin]); + pk_columns.push_back({std::move(column), primary_key.data_types[j], primary_key.column_names[j]}); + } + + Block pk_block(std::move(pk_columns)); + pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, - storage_snapshot->metadata->primary_key, - part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + return std::make_shared(header, pk_block); }); } diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 9b904fc4ae2..92bf5ce3064 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -9,14 +9,10 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header_, - const KeyDescription & primary_key_, - const IMergeTreeDataPart::Index & index_, - size_t mark_range_begin_) +VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_) : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , header(header_), primary_key(primary_key_) - , index(index_), mark_range_begin(mark_range_begin_) + , header(header_), pk_block(pk_block_) { } @@ -89,29 +85,16 @@ void VirtualRowTransform::work() is_first = false; - /// Reorder the columns according to result_header - Columns ordered_columns; - ordered_columns.reserve(header.columns()); - for (size_t i = 0, j = 0; i < header.columns(); ++i) + Columns empty_columns; + empty_columns.reserve(header.columns()); + for (size_t i = 0; i < header.columns(); ++i) { const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); - // ordered_columns.push_back(current_column->cloneResized(1)); - - if (j < index->size() && type_and_name.name == primary_key.column_names[j] - && type_and_name.type == primary_key.data_types[j]) - { - auto column = current_column->cloneEmpty(); - column->insert((*(*index)[j])[mark_range_begin]); - ordered_columns.push_back(std::move(column)); - ++j; - } - else - ordered_columns.push_back(current_column->cloneResized(1)); + empty_columns.push_back(type_and_name.type->createColumn()->cloneEmpty()); } - current_chunk.setColumns(ordered_columns, 1); - current_chunk.getChunkInfos().add(std::make_shared(0, true)); + current_chunk.setColumns(empty_columns, 0); + current_chunk.getChunkInfos().add(std::make_shared(0, pk_block)); } else { diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index b9f0cb46242..e3215393ad1 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -11,10 +11,7 @@ namespace DB class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header_, - const KeyDescription & primary_key_, - const IMergeTreeDataPart::Index & index_, - size_t mark_range_begin_); + explicit VirtualRowTransform(const Block & header_, const Block & pk_block_); String getName() const override { return "VirtualRowTransform"; } @@ -32,11 +29,7 @@ private: bool is_first = true; Block header; - KeyDescription primary_key; - /// PK index used in virtual row. - IMergeTreeDataPart::Index index; - /// The first range that might contain the candidate. - size_t mark_range_begin; + Block pk_block; }; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 85f545d2a51..cafe8dc3fbf 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -147,7 +147,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() auto chunk = Chunk(ordered_columns, res.row_count); if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level)); return ChunkAndProgress{ .chunk = std::move(chunk), diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index c62326f82dd..835045735fe 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -267,7 +267,7 @@ try auto result = Chunk(std::move(res_columns), rows_read); if (add_part_level) - result.getChunkInfos().add(std::make_shared(data_part->info.level, false)); + result.getChunkInfos().add(std::make_shared(data_part->info.level)); return result; } } From 10ed5a8521da3eb91e9d7caaf4bd0bdb32bca25c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 25 Sep 2024 17:48:16 +0000 Subject: [PATCH 059/566] fix --- src/Processors/Merges/Algorithms/MergeTreeReadInfo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 425df2c24b9..98cb414875b 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -54,7 +54,7 @@ inline void setVirtualRow(Chunk & chunk, const Block & header) const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); ColumnPtr current_column = type_and_name.type->createColumn(); - size_t pos = type_and_name.name.find_last_of("."); + size_t pos = type_and_name.name.find_last_of('.'); String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); From bf591fa12b27f16411bf2441b06d1173616d34ba Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 12:20:51 +0000 Subject: [PATCH 060/566] Introduce virtual row conversions. --- .../Merges/Algorithms/MergeTreeReadInfo.h | 40 +++-- .../Algorithms/MergingSortedAlgorithm.cpp | 7 +- .../Algorithms/MergingSortedAlgorithm.h | 5 +- .../Merges/MergingSortedTransform.cpp | 4 +- .../Merges/MergingSortedTransform.h | 1 + .../Optimizations/actionsDAGUtils.cpp | 2 + .../QueryPlan/Optimizations/actionsDAGUtils.h | 4 + .../Optimizations/distinctReadInOrder.cpp | 2 +- .../Optimizations/optimizeReadInOrder.cpp | 168 +++++++++++++----- .../QueryPlan/ReadFromMergeTree.cpp | 73 ++++---- src/Processors/QueryPlan/ReadFromMergeTree.h | 16 +- .../Transforms/MergeSortingTransform.cpp | 2 + .../Transforms/VirtualRowTransform.cpp | 8 +- .../Transforms/VirtualRowTransform.h | 4 +- src/Storages/ReadInOrderOptimizer.cpp | 2 +- src/Storages/SelectQueryInfo.h | 11 +- src/Storages/StorageMerge.cpp | 2 +- 17 files changed, 226 insertions(+), 125 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 98cb414875b..62bbe3eac6e 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB { @@ -13,14 +14,15 @@ public: MergeTreeReadInfo() = delete; explicit MergeTreeReadInfo(size_t part_level) : origin_merge_tree_part_level(part_level) {} - explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_) : - origin_merge_tree_part_level(part_level), pk_block(pk_block_) {} + explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_) : + origin_merge_tree_part_level(part_level), pk_block(pk_block_), virtual_row_conversions(std::move(virtual_row_conversions_)) {} MergeTreeReadInfo(const MergeTreeReadInfo & other) = default; size_t origin_merge_tree_part_level = 0; /// If is virtual_row, block should not be empty. Block pk_block; + ExpressionActionsPtr virtual_row_conversions; }; inline size_t getPartLevelFromChunk(const Chunk & chunk) @@ -39,29 +41,33 @@ inline bool isVirtualRow(const Chunk & chunk) return false; } -inline void setVirtualRow(Chunk & chunk, const Block & header) +inline void setVirtualRow(Chunk & chunk, bool apply_virtual_row_conversions) { - const auto read_info = chunk.getChunkInfos().get(); + auto read_info = chunk.getChunkInfos().extract(); chassert(read_info); - const Block & pk_block = read_info->pk_block; + Block & pk_block = read_info->pk_block; + if (apply_virtual_row_conversions) + read_info->virtual_row_conversions->execute(pk_block); - Columns ordered_columns; - ordered_columns.reserve(header.columns()); + chunk.setColumns(pk_block.getColumns(), 1); - for (size_t i = 0; i < header.columns(); ++i) - { - const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); + // Columns ordered_columns; + // ordered_columns.reserve(pk_block.columns()); - size_t pos = type_and_name.name.find_last_of('.'); - String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + // for (size_t i = 0; i < header.columns(); ++i) + // { + // const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + // ColumnPtr current_column = type_and_name.type->createColumn(); - const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); - } + // size_t pos = type_and_name.name.find_last_of('.'); + // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); - chunk.setColumns(ordered_columns, 1); + // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + // } + + // chunk.setColumns(ordered_columns, 1); } } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 75c04c8ddb2..0dd95729ba3 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -22,12 +22,14 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, WriteBuffer * out_row_sources_buf_, - bool use_average_block_sizes) + bool use_average_block_sizes, + bool apply_virtual_row_conversions_) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) , description(description_) , limit(limit_) , out_row_sources_buf(out_row_sources_buf_) + , apply_virtual_row_conversions(apply_virtual_row_conversions_) , current_inputs(num_inputs) , sorting_queue_strategy(sorting_queue_strategy_) , cursors(num_inputs) @@ -60,7 +62,8 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!isVirtualRow(input.chunk)) continue; - setVirtualRow(input.chunk, header); + setVirtualRow(input.chunk, apply_virtual_row_conversions); + input.skip_last_row = true; } removeConstAndSparse(inputs); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index c889668a38e..0a99b1bd8a6 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -22,7 +22,8 @@ public: SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, WriteBuffer * out_row_sources_buf_ = nullptr, - bool use_average_block_sizes = false); + bool use_average_block_sizes = false, + bool apply_virtual_row_conversions_ = true); void addInput(); @@ -47,6 +48,8 @@ private: /// If it is not nullptr then it should be populated during execution WriteBuffer * out_row_sources_buf = nullptr; + bool apply_virtual_row_conversions; + /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d2895a2a2e9..760108facb6 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -22,6 +22,7 @@ MergingSortedTransform::MergingSortedTransform( bool always_read_till_end_, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, + bool apply_virtual_row_conversions, bool have_all_inputs_) : IMergingTransform( num_inputs, @@ -38,7 +39,8 @@ MergingSortedTransform::MergingSortedTransform( sorting_queue_strategy, limit_, out_row_sources_buf_, - use_average_block_sizes) + use_average_block_sizes, + apply_virtual_row_conversions) { } diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 6e52450efa7..220ecf0902a 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -22,6 +22,7 @@ public: bool always_read_till_end_ = false, WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, + bool apply_virtual_row_conversions = true, bool have_all_inputs_ = true); String getName() const override { return "MergingSortedTransform"; } diff --git a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp index 2f1618ea6e1..b8216d6c4c4 100644 --- a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp +++ b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp @@ -210,6 +210,8 @@ MatchedTrees::Matches matchTrees(const ActionsDAG::NodeRawConstPtrs & inner_dag, MatchedTrees::Monotonicity monotonicity; monotonicity.direction *= info.is_positive ? 1 : -1; monotonicity.strict = info.is_strict; + monotonicity.child_match = &child_match; + monotonicity.child_node = monotonic_child; if (child_match.monotonicity) { diff --git a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h index e78d658978e..82f0962f799 100644 --- a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h +++ b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h @@ -22,12 +22,16 @@ namespace DB /// DAG for PK does not contain aliases and ambiguous nodes. struct MatchedTrees { + struct Match; + /// Monotonicity is calculated for monotonic functions chain. /// Chain is not strict if there is any non-strict monotonic function. struct Monotonicity { int direction = 1; bool strict = true; + const Match * child_match = nullptr; + const ActionsDAG::Node * child_node = nullptr; }; struct Match diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 37e61a6c388..5af680b42b8 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -129,7 +129,7 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) /// update input order info in read_from_merge_tree step const int direction = 0; /// for DISTINCT direction doesn't matter, ReadFromMergeTree will choose proper one - bool can_read = read_from_merge_tree->requestReadingInOrder(number_of_sorted_distinct_columns, direction, pre_distinct->getLimitHint()); + bool can_read = read_from_merge_tree->requestReadingInOrder(number_of_sorted_distinct_columns, direction, pre_distinct->getLimitHint(), {}); if (!can_read) return 0; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index d3ecb3cac6b..8cd0a634a1e 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -94,17 +94,6 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node, StepStack & bac return nullptr; } -static bool checkVirtualRowSupport(const StepStack & backward_path) -{ - for (size_t i = 0; i < backward_path.size() - 1; i++) - { - IQueryPlanStep * step = backward_path[i]; - if (!typeid_cast(step) && !typeid_cast(step)) - return false; - } - return true; -} - void updateStepsDataStreams(StepStack & steps_to_update) { /// update data stream's sorting properties for found transforms @@ -338,11 +327,42 @@ void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) } } -InputOrderInfoPtr buildInputOrderInfo( +static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match) +{ + if (!match->monotonicity) + return &dag.addInput(node->result_name, node->result_type); + + if (node->type == ActionsDAG::ActionType::ALIAS) + return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match), node->result_name); + + ActionsDAG::NodeRawConstPtrs args; + args.reserve(node->children.size()); + for (const auto * child : node->children) + { + if (child == match->monotonicity->child_node) + args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match)); + else + args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name})); + } + + return &dag.addFunction(node->function_base, std::move(args), {}); +} + +struct SortingInputOrder +{ + InputOrderInfoPtr input_order{}; + /// This is needed for virtual row optimization. + /// Convert the PR values to ORDER BY key. + /// If empty, the optimization cannot be applied. + std::optional virtual_row_conversion{}; +}; + +SortingInputOrder buildInputOrderInfo( const FixedColumns & fixed_columns, const std::optional & dag, const SortDescription & description, const KeyDescription & sorting_key, + const Names & pk_column_names, size_t limit) { //std::cerr << "------- buildInputOrderInfo " << std::endl; @@ -381,7 +401,18 @@ InputOrderInfoPtr buildInputOrderInfo( int read_direction = 0; size_t next_description_column = 0; size_t next_sort_key = 0; - bool first_prefix_fixed = false; + + bool can_optimize_virtual_row = true; + + struct MatchInfo + { + const ActionsDAG::Node * source = nullptr; + const ActionsDAG::Node * fixed_column = nullptr; + const MatchedTrees::Match * monotonic = nullptr; + }; + + std::vector match_infos; + match_infos.reserve(description.size()); while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size()) { @@ -424,6 +455,7 @@ InputOrderInfoPtr buildInputOrderInfo( //std::cerr << "====== (no dag) Found direct match" << std::endl; + match_infos.push_back({.source = sort_column_node}); ++next_description_column; ++next_sort_key; } @@ -452,27 +484,46 @@ InputOrderInfoPtr buildInputOrderInfo( { current_direction *= match.monotonicity->direction; strict_monotonic = match.monotonicity->strict; + match_infos.push_back({.source = sort_node, .monotonic = &match}); } + else + match_infos.push_back({.source = sort_node}); ++next_description_column; ++next_sort_key; } else if (fixed_key_columns.contains(sort_column_node)) { + if (next_sort_key == 0) - first_prefix_fixed = true; + { + // Disable virtual row optimization. + // For example, when pk is (a,b), a = 1, order by b, virtual row should be + // disabled in the following case: + // 1st part (0, 100), (1, 2), (1, 3), (1, 4) + // 2nd part (0, 100), (1, 2), (1, 3), (1, 4). + + can_optimize_virtual_row = true; + } //std::cerr << "+++++++++ Found fixed key by match" << std::endl; ++next_sort_key; } else { - //std::cerr << "====== Check for fixed const : " << bool(sort_node->column) << " fixed : " << fixed_columns.contains(sort_node) << std::endl; bool is_fixed_column = sort_node->column || fixed_columns.contains(sort_node); if (!is_fixed_column) break; + if (!sort_node->column) + /// Virtual row for fixed column from order by is not supported now. + /// TODO: we can do it for the simple case, + /// But it's better to remove fixed columns from ORDER BY completely, e.g: + /// WHERE x = 42 ORDER BY x, y => WHERE x = 42 ORDER BY y + can_optimize_virtual_row = false; + + match_infos.push_back({.source = sort_node, .fixed_column = sort_node}); order_key_prefix_descr.push_back(sort_column_description); ++next_description_column; } @@ -494,9 +545,36 @@ InputOrderInfoPtr buildInputOrderInfo( } if (read_direction == 0 || order_key_prefix_descr.empty()) - return nullptr; + return {}; - return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit, first_prefix_fixed); + /// If the prefix description is used, we can't restore the full description from PK value. + /// TODO: partial sort description can be used as well. Implement support later. + if (order_key_prefix_descr.size() < description.size() || pk_column_names.size() < next_sort_key) + can_optimize_virtual_row = false; + + auto order_info = std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit); + + std::optional virtual_row_conversion; + if (can_optimize_virtual_row) + { + ActionsDAG virtual_row_dag; + virtual_row_dag.getOutputs().reserve(match_infos.size()); + for (const auto & info : match_infos) + { + const ActionsDAG::Node * output; + if (info.fixed_column) + output = &virtual_row_dag.addColumn({info.fixed_column->column, info.fixed_column->result_type, info.fixed_column->result_name}); + else if (info.monotonic) + output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic); + else + output = &virtual_row_dag.addInput(info.source->result_name, info.source->result_type); + + virtual_row_dag.getOutputs().push_back(output); + } + virtual_row_conversion = std::move(virtual_row_dag); + } + + return {std::move(order_info), std::move(virtual_row_conversion)}; } /// We really need three different sort descriptions here. @@ -700,11 +778,11 @@ AggregationInputOrder buildInputOrderInfo( for (const auto & key : not_matched_group_by_keys) group_by_sort_description.emplace_back(SortColumnDescription(std::string(key))); - auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0, false); + auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0); return { std::move(input_order), std::move(sort_description_for_merging), std::move(group_by_sort_description) }; } -InputOrderInfoPtr buildInputOrderInfo( +SortingInputOrder buildInputOrderInfo( const ReadFromMergeTree * reading, const FixedColumns & fixed_columns, const std::optional & dag, @@ -712,15 +790,17 @@ InputOrderInfoPtr buildInputOrderInfo( size_t limit) { const auto & sorting_key = reading->getStorageMetadata()->getSortingKey(); + const auto & pk_column_names = reading->getStorageMetadata()->getPrimaryKey().column_names; return buildInputOrderInfo( fixed_columns, dag, description, sorting_key, + pk_column_names, limit); } -InputOrderInfoPtr buildInputOrderInfo( +SortingInputOrder buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, const std::optional & dag, @@ -729,28 +809,31 @@ InputOrderInfoPtr buildInputOrderInfo( { const auto & tables = merge->getSelectedTables(); - InputOrderInfoPtr order_info; + SortingInputOrder order_info; for (const auto & table : tables) { auto storage = std::get(table); - const auto & sorting_key = storage->getInMemoryMetadataPtr()->getSortingKey(); + auto metadata = storage->getInMemoryMetadataPtr(); + const auto & sorting_key = metadata->getSortingKey(); + // const auto & pk_column_names = metadata->getPrimaryKey().column_names; if (sorting_key.column_names.empty()) - return nullptr; + return {}; auto table_order_info = buildInputOrderInfo( fixed_columns, dag, description, sorting_key, + {}, limit); - if (!table_order_info) - return nullptr; + if (!table_order_info.input_order) + return {}; - if (!order_info) - order_info = table_order_info; - else if (*order_info != *table_order_info) - return nullptr; + if (!order_info.input_order) + order_info = std::move(table_order_info); + else if (*order_info.input_order != *table_order_info.input_order) + return {}; } return order_info; @@ -830,19 +913,19 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n dag, description, limit); - if (order_info) + if (order_info.input_order) { - bool can_read = reading->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); + bool can_read = reading->requestReadingInOrder( + order_info.input_order->used_prefix_of_sorting_key_size, + order_info.input_order->direction, + order_info.input_order->limit, + std::move(order_info.virtual_row_conversion)); + if (!can_read) return nullptr; - - if (!checkVirtualRowSupport(backward_path)) - reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::No); - else if (!order_info->first_prefix_fixed) - reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::Possible); } - return order_info; + return order_info.input_order; } else if (auto * merge = typeid_cast(reading_node->step.get())) { @@ -852,14 +935,14 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n dag, description, limit); - if (order_info) + if (order_info.input_order) { - bool can_read = merge->requestReadingInOrder(order_info); + bool can_read = merge->requestReadingInOrder(order_info.input_order); if (!can_read) return nullptr; } - return order_info; + return order_info.input_order; } return nullptr; @@ -893,7 +976,8 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl bool can_read = reading->requestReadingInOrder( order_info.input_order->used_prefix_of_sorting_key_size, order_info.input_order->direction, - order_info.input_order->limit); + order_info.input_order->limit, + {}); if (!can_read) return {}; } @@ -1139,7 +1223,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, if (order_info) { - bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); + bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit, {}); if (!can_read) return 0; sorting->convertToFinishSorting(order_info->sort_description_for_merging, false); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 4b5e33e8b07..f4783862a50 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -549,8 +549,7 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit, - bool enable_current_virtual_row) + UInt64 read_limit) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -661,7 +660,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_current_virtual_row && (read_type == ReadType::InOrder)) + if (virtual_row_conversion && (read_type == ReadType::InOrder)) { const auto & index = part_with_ranges.data_part->getIndex(); const auto & primary_key = storage_snapshot->metadata->primary_key; @@ -681,7 +680,7 @@ Pipe ReadFromMergeTree::readInOrder( pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, pk_block); + return std::make_shared(header, pk_block, virtual_row_conversion); }); } @@ -729,7 +728,7 @@ Pipe ReadFromMergeTree::read( if (read_type == ReadType::Default && (max_streams > 1 || checkAllPartsOnRemoteFS(parts_with_range))) return readFromPool(std::move(parts_with_range), std::move(required_columns), std::move(pool_settings)); - auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0, false); + auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0); /// Use ConcatProcessor to concat sources together. /// It is needed to read in parts order (and so in PK order) if single thread is used. @@ -1038,7 +1037,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( /// For parallel replicas the split will be performed on the initiator side. if (is_parallel_reading_from_replicas) { - pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit, false)); + pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit)); } else { @@ -1111,33 +1110,32 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } - bool primary_key_type_supports_virtual_row = true; - const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); - for (const auto & action : actions) - { - if (action.node->type != ActionsDAG::ActionType::INPUT) - { - primary_key_type_supports_virtual_row = false; - break; - } - } + // bool primary_key_type_supports_virtual_row = true; + // const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); + // for (const auto & action : actions) + // { + // if (action.node->type != ActionsDAG::ActionType::INPUT) + // { + // primary_key_type_supports_virtual_row = false; + // break; + // } + // } - /// If possible in the optimization stage, check whether there are more than one branch. - if (virtual_row_status == VirtualRowStatus::Possible) - virtual_row_status = splitted_parts_and_ranges.size() > 1 - || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) - ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; + // /// If possible in the optimization stage, check whether there are more than one branch. + // if (virtual_row_status == VirtualRowStatus::Possible) + // virtual_row_status = splitted_parts_and_ranges.size() > 1 + // || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) + // ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; for (auto && item : splitted_parts_and_ranges) { - bool enable_current_virtual_row = false; - if (virtual_row_status == VirtualRowStatus::Yes) - enable_current_virtual_row = true; - else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) - enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + // bool enable_current_virtual_row = false; + // if (virtual_row_status == VirtualRowStatus::Yes) + // enable_current_virtual_row = true; + // else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) + // enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, - enable_current_virtual_row && primary_key_type_supports_virtual_row)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } @@ -1172,7 +1170,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( if (pipe.numOutputPorts() > 1) { auto transform = std::make_shared( - pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch); + pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, + 0, false, nullptr, false, /*apply_virtual_row_conversions*/ false); pipe.addTransform(std::move(transform)); } @@ -1811,7 +1810,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( return std::make_shared(std::move(result)); } -bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit) +bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit, std::optional virtual_row_conversion_) { /// if dirction is not set, use current one if (!direction) @@ -1822,7 +1821,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, if (direction != 1 && query_info.isFinal()) return false; - query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit, false); + query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit); reader_settings.read_in_order = true; /// In case or read-in-order, don't create too many reading streams. @@ -1855,6 +1854,9 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// Let prefer in-order optimization over vertical FINAL for now enable_vertical_final = false; + if (virtual_row_conversion_) + virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); + return true; } @@ -2305,6 +2307,12 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const expression->describeActions(format_settings.out, prefix); } } + + if (virtual_row_conversion) + { + format_settings.out << prefix << "Virtual row conversions" << '\n'; + virtual_row_conversion->describeActions(format_settings.out, prefix); + } } void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const @@ -2344,6 +2352,9 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const map.add("Prewhere info", std::move(prewhere_info_map)); } + + if (virtual_row_conversion) + map.add("Virtual row conversions", virtual_row_conversion->toTree()); } void ReadFromMergeTree::describeIndexes(FormatSettings & format_settings) const diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 767fcf3b0f8..e20c06aeb53 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -108,14 +108,6 @@ public: using AnalysisResultPtr = std::shared_ptr; - enum class VirtualRowStatus - { - NoConsiderInLogicalPlan, - Possible, - No, - Yes, - }; - ReadFromMergeTree( MergeTreeData::DataPartsVector parts_, MergeTreeData::MutationsSnapshotPtr mutations_snapshot_, @@ -195,7 +187,7 @@ public: StorageMetadataPtr getStorageMetadata() const { return storage_snapshot->metadata; } /// Returns `false` if requested reading cannot be performed. - bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit); + bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit, std::optional virtual_row_conversion_); bool readsInOrder() const; void updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info_value) override; @@ -218,8 +210,6 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; - void setVirtualRowStatus(VirtualRowStatus virtual_row_status_) { virtual_row_status = virtual_row_status_; } - private: int getSortDirection() const { @@ -262,7 +252,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); @@ -293,7 +283,7 @@ private: bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; - VirtualRowStatus virtual_row_status = VirtualRowStatus::NoConsiderInLogicalPlan; + ExpressionActionsPtr virtual_row_conversion; std::optional number_of_current_replica; }; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index c45192e7118..6121a847ca8 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -187,6 +187,7 @@ void MergeSortingTransform::consume(Chunk chunk) { bool have_all_inputs = false; bool use_average_block_sizes = false; + bool apply_virtual_row = false; external_merging_sorted = std::make_shared( header_without_constants, @@ -199,6 +200,7 @@ void MergeSortingTransform::consume(Chunk chunk) /*always_read_till_end_=*/ false, nullptr, use_average_block_sizes, + apply_virtual_row, have_all_inputs); processors.emplace_back(external_merging_sorted); diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 92bf5ce3064..5f2bf0b0788 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -9,10 +9,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_) +VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_) : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , header(header_), pk_block(pk_block_) + , pk_block(pk_block_) + , virtual_row_conversions(std::move(virtual_row_conversions_)) { } @@ -86,6 +87,7 @@ void VirtualRowTransform::work() is_first = false; Columns empty_columns; + const auto & header = getOutputs().front().getHeader(); empty_columns.reserve(header.columns()); for (size_t i = 0; i < header.columns(); ++i) { @@ -94,7 +96,7 @@ void VirtualRowTransform::work() } current_chunk.setColumns(empty_columns, 0); - current_chunk.getChunkInfos().add(std::make_shared(0, pk_block)); + current_chunk.getChunkInfos().add(std::make_shared(0, pk_block, virtual_row_conversions)); } else { diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index e3215393ad1..efc54419a6e 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -11,7 +11,7 @@ namespace DB class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header_, const Block & pk_block_); + explicit VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_); String getName() const override { return "VirtualRowTransform"; } @@ -28,8 +28,8 @@ private: bool can_generate = true; bool is_first = true; - Block header; Block pk_block; + ExpressionActionsPtr virtual_row_conversions; }; } diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index ea7ea218feb..9c8c4c2fe79 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -249,7 +249,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrderImpl( if (sort_description_for_merging.empty()) return {}; - return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit, false); + return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit); } InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder( diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index bf1229f7a3a..7ad6a733c6f 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -119,22 +119,13 @@ struct InputOrderInfo const int direction; const UInt64 limit; - /** For virtual row optimization only - * for example, when pk is (a,b), a = 1, order by b, virtual row should be - * disabled in the following case: - * 1st part (0, 100), (1, 2), (1, 3), (1, 4) - * 2nd part (0, 100), (1, 2), (1, 3), (1, 4). - */ - bool first_prefix_fixed; - InputOrderInfo( const SortDescription & sort_description_for_merging_, size_t used_prefix_of_sorting_key_size_, - int direction_, UInt64 limit_, bool first_prefix_fixed_) + int direction_, UInt64 limit_) : sort_description_for_merging(sort_description_for_merging_) , used_prefix_of_sorting_key_size(used_prefix_of_sorting_key_size_) , direction(direction_), limit(limit_) - , first_prefix_fixed(first_prefix_fixed_) { } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f40aa8ae4e8..40713a89f30 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1555,7 +1555,7 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_) auto request_read_in_order = [order_info_](ReadFromMergeTree & read_from_merge_tree) { return read_from_merge_tree.requestReadingInOrder( - order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit); + order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit, {}); }; bool ok = true; From 7feda9a05413fedf681d3fe1e229bf9e5ab434ef Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 15:27:57 +0000 Subject: [PATCH 061/566] Fix 03031_read_in_order_optimization_with_virtual_row --- .../Merges/Algorithms/MergeTreeReadInfo.h | 51 +++++++++++++------ .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Optimizations/optimizeReadInOrder.cpp | 24 ++++++--- .../QueryPlan/ReadFromMergeTree.cpp | 5 +- 4 files changed, 57 insertions(+), 26 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 62bbe3eac6e..a4baaca215b 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -7,6 +7,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + /// To carry part level and virtual row if chunk is produced by a merge tree source class MergeTreeReadInfo : public ChunkInfoCloneable { @@ -41,33 +46,49 @@ inline bool isVirtualRow(const Chunk & chunk) return false; } -inline void setVirtualRow(Chunk & chunk, bool apply_virtual_row_conversions) +inline void setVirtualRow(Chunk & chunk, const Block & header, bool apply_virtual_row_conversions) { - auto read_info = chunk.getChunkInfos().extract(); + auto read_info = chunk.getChunkInfos().get(); chassert(read_info); Block & pk_block = read_info->pk_block; + + // std::cerr << apply_virtual_row_conversions << std::endl; + // std::cerr << read_info->virtual_row_conversions->dumpActions() << std::endl; + if (apply_virtual_row_conversions) read_info->virtual_row_conversions->execute(pk_block); - chunk.setColumns(pk_block.getColumns(), 1); + // std::cerr << "++++" << pk_block.dumpStructure() << std::endl; - // Columns ordered_columns; - // ordered_columns.reserve(pk_block.columns()); + Columns ordered_columns; + ordered_columns.reserve(pk_block.columns()); - // for (size_t i = 0; i < header.columns(); ++i) - // { - // const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - // ColumnPtr current_column = type_and_name.type->createColumn(); + for (size_t i = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & col = header.getByPosition(i); + if (const auto * pk_col = pk_block.findByName(col.name)) + { + if (!col.type->equals(*pk_col->type)) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Virtual row has different tupe for {}. Expected {}, got {}", + col.name, col.dumpStructure(), pk_col->dumpStructure()); - // size_t pos = type_and_name.name.find_last_of('.'); - // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + ordered_columns.push_back(pk_col->column); + } + else + ordered_columns.push_back(col.type->createColumnConstWithDefaultValue(1)); - // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); - // } + // ColumnPtr current_column = type_and_name.type->createColumn(); - // chunk.setColumns(ordered_columns, 1); + // size_t pos = type_and_name.name.find_last_of('.'); + // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + + // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + } + + chunk.setColumns(ordered_columns, 1); } } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 0dd95729ba3..011f713744b 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -62,8 +62,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!isVirtualRow(input.chunk)) continue; - setVirtualRow(input.chunk, apply_virtual_row_conversions); - input.skip_last_row = true; + setVirtualRow(input.chunk, header, apply_virtual_row_conversions); } removeConstAndSparse(inputs); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 49ce9a0280d..5396cced6c1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -339,20 +339,20 @@ void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) } } -static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match) +static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match, const std::string & input_name) { if (!match->monotonicity) - return &dag.addInput(node->result_name, node->result_type); + return &dag.addInput(input_name, node->result_type); if (node->type == ActionsDAG::ActionType::ALIAS) - return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match), node->result_name); + return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match, input_name), node->result_name); ActionsDAG::NodeRawConstPtrs args; args.reserve(node->children.size()); for (const auto * child : node->children) { if (child == match->monotonicity->child_node) - args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match)); + args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match, input_name)); else args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name})); } @@ -571,15 +571,25 @@ SortingInputOrder buildInputOrderInfo( { ActionsDAG virtual_row_dag; virtual_row_dag.getOutputs().reserve(match_infos.size()); + size_t next_pk_name = 0; for (const auto & info : match_infos) { const ActionsDAG::Node * output; if (info.fixed_column) output = &virtual_row_dag.addColumn({info.fixed_column->column, info.fixed_column->result_type, info.fixed_column->result_name}); - else if (info.monotonic) - output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic); else - output = &virtual_row_dag.addInput(info.source->result_name, info.source->result_type); + { + if (info.monotonic) + output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic, pk_column_names[next_pk_name]); + else + { + output = &virtual_row_dag.addInput(pk_column_names[next_pk_name], info.source->result_type); + if (pk_column_names[next_pk_name] != info.source->result_name) + output = &virtual_row_dag.addAlias(*output, info.source->result_name); + } + + ++next_pk_name; + } virtual_row_dag.getOutputs().push_back(output); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 38f018d34ee..c6fc924d7a7 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -701,9 +701,10 @@ Pipe ReadFromMergeTree::readInOrder( size_t mark_range_begin = part_with_ranges.ranges.front().begin; ColumnsWithTypeAndName pk_columns; - pk_columns.reserve(index->size()); + size_t num_columns = virtual_row_conversion->getSampleBlock().columns(); + pk_columns.reserve(num_columns); - for (size_t j = 0; j < index->size(); ++j) + for (size_t j = 0; j < num_columns; ++j) { auto column = primary_key.data_types[j]->createColumn()->cloneEmpty(); column->insert((*(*index)[j])[mark_range_begin]); From d5c0c499df1c80d94c3248394f8d8e271003d8fc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 16:01:47 +0000 Subject: [PATCH 062/566] Fix PK size. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index c6fc924d7a7..b16a460ec68 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -701,7 +701,7 @@ Pipe ReadFromMergeTree::readInOrder( size_t mark_range_begin = part_with_ranges.ranges.front().begin; ColumnsWithTypeAndName pk_columns; - size_t num_columns = virtual_row_conversion->getSampleBlock().columns(); + size_t num_columns = virtual_row_conversion->getRequiredColumnsWithTypes().size(); pk_columns.reserve(num_columns); for (size_t j = 0; j < num_columns; ++j) From d6b444dac9328ea0b64fda1005f78e2164fbab1b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 16:12:18 +0000 Subject: [PATCH 063/566] Skip virtual row chunk by skipping last row. --- .../Algorithms/MergingSortedAlgorithm.cpp | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 011f713744b..331b67066be 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -63,6 +63,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) continue; setVirtualRow(input.chunk, header, apply_virtual_row_conversions); + input.skip_last_row = true; } removeConstAndSparse(inputs); @@ -149,8 +150,8 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - if (isVirtualRow(current_inputs[current.impl->order].chunk)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); + // if (isVirtualRow(current_inputs[current.impl->order].chunk)) + // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { @@ -248,14 +249,14 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - if (isVirtualRow(current_inputs[current.impl->order].chunk)) - { - /// If virtual row is detected, there should be only one row as a single chunk, - /// and always skip this chunk to pull the next one. - chassert(initial_batch_size == 1); - queue.removeTop(); - return Status(current.impl->order); - } + // if (isVirtualRow(current_inputs[current.impl->order].chunk)) + // { + // /// If virtual row is detected, there should be only one row as a single chunk, + // /// and always skip this chunk to pull the next one. + // chassert(initial_batch_size == 1); + // queue.removeTop(); + // return Status(current.impl->order); + // } bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) From fb0b46adbf40dfbf7feaf36796c88d8e82da6633 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 27 Sep 2024 09:24:54 +0000 Subject: [PATCH 064/566] DIsable virtual row for FINAL. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b16a460ec68..37a159dd865 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1890,7 +1890,8 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// Let prefer in-order optimization over vertical FINAL for now enable_vertical_final = false; - if (virtual_row_conversion_) + /// Disable virtual row for FINAL. + if (virtual_row_conversion_ && !isQueryWithFinal()) virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); return true; From 63c89ded04c36c572e9280c356a4bf5570c65bf7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 27 Sep 2024 10:56:28 +0000 Subject: [PATCH 065/566] Fixing other tests. --- .../Optimizations/optimizeReadInOrder.cpp | 2 +- .../01786_explain_merge_tree.reference | 14 ++++++++++++++ .../02149_read_in_order_fixed_prefix.reference | 16 ++++++---------- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 5396cced6c1..cac4cf69054 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -515,7 +515,7 @@ SortingInputOrder buildInputOrderInfo( // 1st part (0, 100), (1, 2), (1, 3), (1, 4) // 2nd part (0, 100), (1, 2), (1, 3), (1, 4). - can_optimize_virtual_row = true; + can_optimize_virtual_row = false; } //std::cerr << "+++++++++ Found fixed key by match" << std::endl; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 3a015d32539..f02dbcb59c9 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -86,11 +86,17 @@ ReadType: InOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT :: 0 -> x UInt32 : 0 + Positions: 0 ----------------- ReadFromMergeTree (default.test_index) ReadType: InReverseOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT :: 0 -> x UInt32 : 0 + Positions: 0 ReadFromMergeTree (default.idx) Indexes: PrimaryKey @@ -174,11 +180,19 @@ ReadType: InOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT : 0 -> x UInt32 : 0 + ALIAS x :: 0 -> __table1.x UInt32 : 1 + Positions: 1 ----------------- ReadFromMergeTree (default.test_index) ReadType: InReverseOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT : 0 -> x UInt32 : 0 + ALIAS x :: 0 -> __table1.x UInt32 : 1 + Positions: 1 ReadFromMergeTree (default.idx) Indexes: PrimaryKey diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index 31462988c2d..cb96a7167da 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -14,10 +14,7 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -54,10 +51,7 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 @@ -178,7 +172,8 @@ ExpressionTransform (Expression) ExpressionTransform (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 @@ -192,7 +187,8 @@ ExpressionTransform (Expression) ExpressionTransform (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 From aaabaadf5650f37507ba07b7eca3327b4a41db95 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 28 Sep 2024 14:15:43 +0000 Subject: [PATCH 066/566] cleanup --- .../Merges/Algorithms/MergeTreeReadInfo.h | 10 +- .../Algorithms/MergingSortedAlgorithm.cpp | 17 --- .../QueryPlan/ReadFromMergeTree.cpp | 25 ---- .../02521_aggregation_by_partitions.reference | 112 +++++------------- 4 files changed, 33 insertions(+), 131 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index a4baaca215b..253d008c21d 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -71,21 +71,13 @@ inline void setVirtualRow(Chunk & chunk, const Block & header, bool apply_virtua { if (!col.type->equals(*pk_col->type)) throw Exception(ErrorCodes::LOGICAL_ERROR, - "Virtual row has different tupe for {}. Expected {}, got {}", + "Virtual row has different type for {}. Expected {}, got {}", col.name, col.dumpStructure(), pk_col->dumpStructure()); ordered_columns.push_back(pk_col->column); } else ordered_columns.push_back(col.type->createColumnConstWithDefaultValue(1)); - - // ColumnPtr current_column = type_and_name.type->createColumn(); - - // size_t pos = type_and_name.name.find_last_of('.'); - // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); - - // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); } chunk.setColumns(ordered_columns, 1); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 331b67066be..f2ebf9053ea 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -8,11 +8,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - MergingSortedAlgorithm::MergingSortedAlgorithm( Block header_, size_t num_inputs, @@ -150,9 +145,6 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - // if (isVirtualRow(current_inputs[current.impl->order].chunk)) - // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); - if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { /// Get the next block from the corresponding source, if there is one. @@ -249,15 +241,6 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - // if (isVirtualRow(current_inputs[current.impl->order].chunk)) - // { - // /// If virtual row is detected, there should be only one row as a single chunk, - // /// and always skip this chunk to pull the next one. - // chassert(initial_batch_size == 1); - // queue.removeTop(); - // return Status(current.impl->order); - // } - bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index e90d6165aa3..6622662d0a0 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1133,33 +1133,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } - // bool primary_key_type_supports_virtual_row = true; - // const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); - // for (const auto & action : actions) - // { - // if (action.node->type != ActionsDAG::ActionType::INPUT) - // { - // primary_key_type_supports_virtual_row = false; - // break; - // } - // } - - // /// If possible in the optimization stage, check whether there are more than one branch. - // if (virtual_row_status == VirtualRowStatus::Possible) - // virtual_row_status = splitted_parts_and_ranges.size() > 1 - // || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) - // ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; - for (auto && item : splitted_parts_and_ranges) - { - // bool enable_current_virtual_row = false; - // if (virtual_row_status == VirtualRowStatus::Yes) - // enable_current_virtual_row = true; - // else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) - // enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); - } } Block pipe_header; diff --git a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference index addc36421c3..87b2d5c3430 100644 --- a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference +++ b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference @@ -160,100 +160,52 @@ ExpressionTransform × 16 (ReadFromMergeTree) MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 1000000 Skip merging: 1 Skip merging: 1 From 4e6180b50aaf3e39616750f8e4c6b114e0362e97 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:18:44 +0000 Subject: [PATCH 067/566] Resolve conflicts, better exception message --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 8 ++++++-- src/Core/Settings.h | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 8 ++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index f3d77b0f091..56c96d41c6c 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -103,6 +103,8 @@ namespace Setting extern const SettingsBool single_join_prefer_left_table; extern const SettingsBool transform_null_in; extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; } @@ -4100,7 +4102,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const { - if (scope.context->getSettingsRef().allow_suspicious_types_in_order_by) + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) return; auto check = [](const IDataType & type) @@ -4109,6 +4111,7 @@ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; @@ -4189,7 +4192,7 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR */ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const { - if (scope.context->getSettingsRef().allow_suspicious_types_in_group_by) + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) return; auto check = [](const IDataType & type) @@ -4198,6 +4201,7 @@ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index bc2d0b423c1..5909ab6314c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -156,4 +156,4 @@ struct Settings private: std::unique_ptr impl; }; -} \ No newline at end of file +} diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index dc7dca712a0..9a09bf8e16f 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -106,6 +106,8 @@ namespace Setting extern const SettingsBool query_plan_aggregation_in_order; extern const SettingsBool query_plan_read_in_order; extern const SettingsUInt64 use_index_for_in_with_subqueries_max_values; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; } @@ -1409,7 +1411,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr & key_type) const { - if (getContext()->getSettingsRef().allow_suspicious_types_in_group_by) + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) return; auto check = [](const IDataType & type) @@ -1418,6 +1420,7 @@ void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; @@ -1692,7 +1695,7 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & key_type) const { - if (getContext()->getSettingsRef().allow_suspicious_types_in_order_by) + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) return; auto check = [](const IDataType & type) @@ -1701,6 +1704,7 @@ void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & k throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; From 11c3c0de2447e5fcab999b13d0539cd074f3831d Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:22:34 +0000 Subject: [PATCH 068/566] Even better exception message --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 6 ++++-- src/Interpreters/ExpressionAnalyzer.cpp | 6 ++++-- src/Storages/KeyDescription.cpp | 5 ++++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 56c96d41c6c..7dc1d99efd0 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4111,7 +4111,8 @@ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; @@ -4201,7 +4202,8 @@ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 9a09bf8e16f..12e769f249a 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1420,7 +1420,8 @@ void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; @@ -1704,7 +1705,8 @@ void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & k throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index bb0b6d3542d..5c0449612e7 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -155,7 +155,10 @@ KeyDescription KeyDescription::getSortingKeyFromAST( auto check = [&](const IDataType & type) { if (isDynamic(type) || isVariant(type)) - throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column with type Variant/Dynamic is not allowed in key expression"); + throw Exception( + ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, + "Column with type Variant/Dynamic is not allowed in key expression. Consider using a subcolumn with a specific data " + "type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn) or casting this column to a specific data type"); }; check(*result.data_types.back()); From dda32963fdd399c2c614b2cb630fb714549e2804 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:57:19 +0000 Subject: [PATCH 069/566] Fix tests --- src/Core/SettingsChangesHistory.cpp | 6 ++---- .../03096_variant_in_primary_key.reference | 4 ---- .../0_stateless/03096_variant_in_primary_key.sql | 8 -------- .../03231_dynamic_incomplete_type_insert_bug.sql | 1 + .../03231_dynamic_not_safe_primary_key.reference | 0 .../03231_dynamic_not_safe_primary_key.sql | 11 ----------- .../0_stateless/03231_dynamic_uniq_group_by.sql | 2 ++ 7 files changed, 5 insertions(+), 27 deletions(-) delete mode 100644 tests/queries/0_stateless/03096_variant_in_primary_key.reference delete mode 100644 tests/queries/0_stateless/03096_variant_in_primary_key.sql delete mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference delete mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 21a42b970f2..7bc9517a6a6 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -72,6 +72,8 @@ static std::initializer_list Date: Tue, 1 Oct 2024 12:59:46 +0000 Subject: [PATCH 070/566] Fix tests --- tests/queries/0_stateless/01825_new_type_json_10.sql | 1 + tests/queries/0_stateless/01825_new_type_json_11.sh | 6 +++--- tests/queries/0_stateless/01825_new_type_json_12.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_13.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_6.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_7.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_in_array.sql | 3 +++ .../0_stateless/01825_new_type_json_insert_select.sql | 2 ++ .../queries/0_stateless/02421_new_type_json_async_insert.sh | 2 +- .../0_stateless/03151_dynamic_type_scale_max_types.sql | 5 +++-- 11 files changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql index f586cc4477b..9aac35e2c88 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.sql +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest SET allow_experimental_json_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t_json_10; CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh index f448b7433ab..e9b90af4499 100755 --- a/tests/queries/0_stateless/01825_new_type_json_11.sh +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -57,8 +57,8 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_1[]))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_1[].key_3[])))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_1[].key_3[].key_4[]))))) as path FROM t_json_11 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_11;" diff --git a/tests/queries/0_stateless/01825_new_type_json_12.sh b/tests/queries/0_stateless/01825_new_type_json_12.sh index d7c938d7cd1..e3909787690 100755 --- a/tests/queries/0_stateless/01825_new_type_json_12.sh +++ b/tests/queries/0_stateless/01825_new_type_json_12.sh @@ -49,6 +49,6 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_0[].key_1[].key_3[]))))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 $CLICKHOUSE_CLIENT -q "SELECT obj.key_0[].key_1[].key_3[].key_4, obj.key_0[].key_1[].key_3[].key_5, \ - obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" + obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_12;" diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index 316e6890d5e..e7d9f556be7 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -45,6 +45,6 @@ $CLICKHOUSE_CLIENT -q "SELECT \ obj.key_1.key_2.key_3.key_4.key_5, \ obj.key_1.key_2.key_3.key_4.key_6, \ obj.key_1.key_2.key_3.key_4.key_7 \ -FROM t_json_13 ORDER BY obj.id" +FROM t_json_13 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_13;" diff --git a/tests/queries/0_stateless/01825_new_type_json_6.sh b/tests/queries/0_stateless/01825_new_type_json_6.sh index 6b9a7e71f50..a2102636c42 100755 --- a/tests/queries/0_stateless/01825_new_type_json_6.sh +++ b/tests/queries/0_stateless/01825_new_type_json_6.sh @@ -54,6 +54,6 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.out[]))) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(data.out[].outputs[])))) as path FROM t_json_6 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" +$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_6;" diff --git a/tests/queries/0_stateless/01825_new_type_json_7.sh b/tests/queries/0_stateless/01825_new_type_json_7.sh index 36483175df6..b6ea46f5ff8 100755 --- a/tests/queries/0_stateless/01825_new_type_json_7.sh +++ b/tests/queries/0_stateless/01825_new_type_json_7.sh @@ -25,6 +25,6 @@ cat < Date: Wed, 2 Oct 2024 15:16:38 +0000 Subject: [PATCH 071/566] Fix tests --- tests/queries/0_stateless/01825_new_type_json_12.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_13.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_in_array.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_12.sh b/tests/queries/0_stateless/01825_new_type_json_12.sh index e3909787690..fd5b9fddd75 100755 --- a/tests/queries/0_stateless/01825_new_type_json_12.sh +++ b/tests/queries/0_stateless/01825_new_type_json_12.sh @@ -47,7 +47,7 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_0[]))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_0[].key_1[])))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_0[].key_1[].key_3[]))))) as path FROM t_json_12 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT obj.key_0[].key_1[].key_3[].key_4, obj.key_0[].key_1[].key_3[].key_5, \ obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index e7d9f556be7..116665e58e3 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -39,7 +39,7 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as path FROM t_json_13 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key1[]))) as path FROM t_json_13 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT \ obj.key_1.key_2.key_3.key_8, \ obj.key_1.key_2.key_3.key_4.key_5, \ diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql index 3d2e04a1bfd..ef15061e6c8 100644 --- a/tests/queries/0_stateless/01825_new_type_json_in_array.sql +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -3,7 +3,7 @@ SET allow_experimental_json_type = 1; SET allow_experimental_analyzer = 1; SET allow_suspicious_types_in_order_by = 1; -SET allow_suspicious_types_in_order_by = 1; +SET allow_suspicious_types_in_group_by = 1; DROP TABLE IF EXISTS t_json_array; From 2f923ee24278a22e2c78d957f76077dff21176a5 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 4 Oct 2024 14:36:28 +0000 Subject: [PATCH 072/566] Fix old analyzer --- src/Interpreters/ExpressionAnalyzer.cpp | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 12e769f249a..5913cf644d8 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1372,6 +1372,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join); ASTs asts = select_query->groupBy()->children; + NameSet group_by_keys; if (select_query->group_by_with_grouping_sets) { for (const auto & ast : asts) @@ -1379,6 +1380,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast_element : ast->children) { step.addRequiredOutput(ast_element->getColumnName()); + group_by_keys.insert(ast_element->getColumnName()); getRootActions(ast_element, only_types, step.actions()->dag); } } @@ -1388,12 +1390,16 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast : asts) { step.addRequiredOutput(ast->getColumnName()); + group_by_keys.insert(ast->getColumnName()); getRootActions(ast, only_types, step.actions()->dag); } } for (const auto & result_column : step.getResultColumns()) - validateGroupByKeyType(result_column.type); + { + if (group_by_keys.contains(result_column.name)) + validateGroupByKeyType(result_column.type); + } if (optimize_aggregation_in_order) { @@ -1612,9 +1618,6 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( getRootActions(select_query->orderBy(), only_types, step.actions()->dag); - for (const auto & result_column : step.getResultColumns()) - validateOrderByKeyType(result_column.type); - bool with_fill = false; for (auto & child : select_query->orderBy()->children) @@ -1629,6 +1632,12 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( with_fill = true; } + for (const auto & result_column : step.getResultColumns()) + { + if (order_by_keys.contains(result_column.name)) + validateOrderByKeyType(result_column.type); + } + if (auto interpolate_list = select_query->interpolate()) { From 93620886f689d3b6ed59a9a36539c2902158e6b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 6 Oct 2024 22:16:06 +0300 Subject: [PATCH 073/566] Revert part actual name to pass the check --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 3f5c70adb64..4a994bc38e2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -844,8 +844,9 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: } } - /// Save the current temporary path in case we need to revert the change to retry (ZK connection loss) + /// Save the current temporary path and name in case we need to revert the change to retry (ZK connection loss) or in case part is deduplicated. const String temporary_part_relative_path = part->getDataPartStorage().getPartDirectory(); + const String initial_part_name = part->name; /// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem. /// We remove the lock just after renaming the part. In case of exception, block number will be marked as abandoned. @@ -1024,6 +1025,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: transaction.rollbackPartsToTemporaryState(); part->is_temp = true; + part->setName(initial_part_name); part->renameTo(temporary_part_relative_path, false); if constexpr (async_insert) From 91931b5b3cccabdc94231a8a07ad4d2e8de8d8b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 6 Oct 2024 22:56:48 +0300 Subject: [PATCH 074/566] Fix style --- tests/integration/test_deduplicated_attached_part_rename/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 7afd85c62dc..02fa2c9d4a3 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -1,4 +1,5 @@ import pytest + from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) From 52484cbfec0c168bb440d623673aeb321e1c0211 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:45:23 +0800 Subject: [PATCH 075/566] Fix tests --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index 6a4fc7d5935..cabc3efcd8e 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -16,7 +16,7 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_order_by 1 --allow_suspicious_types_in_group_by 1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ From 7808f00857a157e2b49606df6de567a63462aa58 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 06:53:12 +0000 Subject: [PATCH 076/566] Support alter from String to JSON --- src/Columns/ColumnArray.h | 7 + src/Columns/ColumnDynamic.cpp | 9 ++ src/Columns/ColumnDynamic.h | 1 + src/Columns/ColumnMap.cpp | 7 + src/Columns/ColumnMap.h | 1 + src/Columns/ColumnObject.cpp | 25 ++++ src/Columns/ColumnObject.h | 2 + src/Columns/ColumnTuple.cpp | 20 +++ src/Columns/ColumnTuple.h | 1 + src/Columns/ColumnVariant.cpp | 17 +++ src/Columns/ColumnVariant.h | 1 + src/Columns/IColumn.h | 3 + .../Serializations/SerializationDynamic.cpp | 45 +++--- .../Serializations/SerializationDynamic.h | 20 ++- .../Serializations/SerializationObject.cpp | 22 ++- .../Serializations/SerializationObject.h | 16 ++- src/Functions/FunctionsConversion.cpp | 5 +- src/Storages/AlterCommands.cpp | 14 +- .../MergeTreeDataPartWriterCompact.cpp | 31 ++-- .../MergeTreeDataPartWriterCompact.h | 6 +- .../MergeTreeDataPartWriterOnDisk.cpp | 39 +++++ .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 12 ++ .../MergeTree/MergeTreeDataPartWriterWide.cpp | 32 ++--- .../MergeTree/MergeTreeDataPartWriterWide.h | 8 +- .../03246_alter_from_string_to_json.reference | 134 ++++++++++++++++++ .../03246_alter_from_string_to_json.sql.j2 | 32 +++++ ...3247_ghdata_string_to_json_alter.reference | 12 ++ .../03247_ghdata_string_to_json_alter.sh | 30 ++++ .../03248_string_to_json_alter_fuzz.reference | 0 .../03248_string_to_json_alter_fuzz.sql | 17 +++ 30 files changed, 459 insertions(+), 110 deletions(-) create mode 100644 tests/queries/0_stateless/03246_alter_from_string_to_json.reference create mode 100644 tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 create mode 100644 tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference create mode 100755 tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh create mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference create mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index f77268a8be6..df52880d6e4 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -192,6 +192,13 @@ public: bool hasDynamicStructure() const override { return getData().hasDynamicStructure(); } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; + bool dynamicStructureEquals(const IColumn & rhs) const override + { + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return data->dynamicStructureEquals(*rhs_concrete->data); + return false; + } + private: WrappedPtr data; WrappedPtr offsets; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 5a837a62761..09a05e52c90 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -1153,6 +1153,15 @@ void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns) } } +bool ColumnDynamic::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return max_dynamic_types == rhs_concrete->max_dynamic_types && global_max_dynamic_types == rhs_concrete->global_max_dynamic_types + && variant_info.variant_name == rhs_concrete->variant_info.variant_name + && variant_column->dynamicStructureEquals(*rhs_concrete->variant_column); + return false; +} + void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 17b0d80e5eb..9e8b1f79321 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -367,6 +367,7 @@ public: bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const StatisticsPtr & getStatistics() const { return statistics; } diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 536da4d06d0..4e81191939f 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -330,6 +330,13 @@ bool ColumnMap::structureEquals(const IColumn & rhs) const return false; } +bool ColumnMap::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_map = typeid_cast(&rhs)) + return nested->dynamicStructureEquals(*rhs_map->nested); + return false; +} + ColumnPtr ColumnMap::compress() const { auto compressed = nested->compress(); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 39d15a586b9..8cb0b1680a7 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -120,6 +120,7 @@ public: ColumnPtr compress() const override; bool hasDynamicStructure() const override { return nested->hasDynamicStructure(); } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; }; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 3577ab1ec82..8e0182c7276 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1299,6 +1299,31 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col } } +bool ColumnObject::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_object = typeid_cast(&rhs); + if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() + || global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types + || dynamic_paths.size() != rhs_object->dynamic_paths.size()) + return false; + + for (const auto & [path, column] : typed_paths) + { + auto it = rhs_object->typed_paths.find(path); + if (it == rhs_object->typed_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + for (const auto & [path, column] : dynamic_paths) + { + auto it = rhs_object->dynamic_paths.find(path); + if (it == rhs_object->dynamic_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + return true; +} + void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index c7f282d9079..d5370625115 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -172,6 +172,7 @@ public: bool isFinalized() const override; bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const PathToColumnMap & getTypedPaths() const { return typed_paths; } @@ -221,6 +222,7 @@ public: void setDynamicPaths(const std::vector & paths); void setMaxDynamicPaths(size_t max_dynamic_paths_); + void setGlobalMaxDynamicPaths(size_t global_max_dynamic_paths_); void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n); diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index e741eb51c68..42acfdc85be 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -727,6 +727,26 @@ bool ColumnTuple::hasDynamicStructure() const return false; } +bool ColumnTuple::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_tuple = typeid_cast(&rhs)) + { + const size_t tuple_size = columns.size(); + if (tuple_size != rhs_tuple->columns.size()) + return false; + + for (size_t i = 0; i < tuple_size; ++i) + if (!columns[i]->dynamicStructureEquals(*rhs_tuple->columns[i])) + return false; + + return true; + } + else + { + return false; + } +} + void ColumnTuple::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { std::vector nested_source_columns; diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 6968294aef9..2539c27c441 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -138,6 +138,7 @@ public: ColumnPtr & getColumnPtr(size_t idx) { return columns[idx]; } bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; /// Empty tuple needs a public method to manage its size. diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index c6511695f5c..a18dffd8360 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -1376,6 +1376,23 @@ bool ColumnVariant::structureEquals(const IColumn & rhs) const return true; } +bool ColumnVariant::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_variant = typeid_cast(&rhs); + if (!rhs_variant) + return false; + + const size_t num_variants = variants.size(); + if (num_variants != rhs_variant->variants.size()) + return false; + + for (size_t i = 0; i < num_variants; ++i) + if (!variants[i]->dynamicStructureEquals(rhs_variant->getVariantByGlobalDiscriminator(globalDiscriminatorByLocal(i)))) + return false; + + return true; +} + ColumnPtr ColumnVariant::compress() const { ColumnPtr local_discriminators_compressed = local_discriminators->compress(); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 925eab74af8..2084de4fae7 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -327,6 +327,7 @@ public: void extend(const std::vector & old_to_new_global_discriminators, std::vector> && new_variants_and_discriminators); bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; private: diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index e4fe233ffdf..7131765f99c 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -590,6 +590,9 @@ public: /// Checks if column has dynamic subcolumns. virtual bool hasDynamicStructure() const { return false; } + + /// For columns with dynamic subcolumns checks if columns have equal dynamic structure. + [[nodiscard]] virtual bool dynamicStructureEquals(const IColumn & rhs) const { return structureEquals(rhs); } /// For columns with dynamic subcolumns this method takes dynamic structure from source columns /// and creates proper resulting dynamic structure in advance for merge of these source columns. virtual void takeDynamicStructureFromSourceColumns(const std::vector & /*source_columns*/) {} diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 18a75918499..b00668fa8a4 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -26,8 +26,8 @@ namespace ErrorCodes struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState { - SerializationDynamic::DynamicStructureSerializationVersion structure_version; - size_t max_dynamic_types; + SerializationDynamic::DynamicSerializationVersion structure_version; + size_t num_dynamic_types; DataTypePtr variant_type; Names variant_names; SerializationPtr variant_serialization; @@ -81,14 +81,14 @@ void SerializationDynamic::enumerateStreams( settings.path.pop_back(); } -SerializationDynamic::DynamicStructureSerializationVersion::DynamicStructureSerializationVersion(UInt64 version) : value(static_cast(version)) +SerializationDynamic::DynamicSerializationVersion::DynamicSerializationVersion(UInt64 version) : value(static_cast(version)) { checkVersion(version); } -void SerializationDynamic::DynamicStructureSerializationVersion::checkVersion(UInt64 version) +void SerializationDynamic::DynamicSerializationVersion::checkVersion(UInt64 version) { - if (version != VariantTypeName) + if (version != V1 && version != V2) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization."); } @@ -108,22 +108,17 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state prefix"); /// Write structure serialization version. - UInt64 structure_version = DynamicStructureSerializationVersion::Value::VariantTypeName; + UInt64 structure_version = DynamicSerializationVersion::Value::V2; writeBinaryLittleEndian(structure_version, *stream); auto dynamic_state = std::make_shared(structure_version); - dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); - /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types - /// that is specified in the Dynamic type (we could decrease it before merge). - writeVarUInt(dynamic_state->max_dynamic_types, *stream); - dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; const auto & variant_column = column_dynamic.getVariantColumn(); - /// Write information about variants. - size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. - writeVarUInt(num_variants, *stream); + /// Write information about dynamic types. + dynamic_state->num_dynamic_types = dynamic_state->variant_names.size() - 1; /// -1 for SharedVariant + writeVarUInt(dynamic_state->num_dynamic_types, *stream); if (settings.data_types_binary_encoding) { const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); @@ -251,22 +246,25 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD UInt64 structure_version; readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); - /// Read max_dynamic_types parameter. - readVarUInt(structure_state->max_dynamic_types, *structure_stream); + if (structure_state->structure_version.value == DynamicSerializationVersion::Value::V1) + { + /// Skip max_dynamic_types parameter in V1 serialization version. + size_t max_dynamic_types; + readVarUInt(max_dynamic_types, *structure_stream); + } /// Read information about variants. DataTypes variants; - size_t num_variants; - readVarUInt(num_variants, *structure_stream); - variants.reserve(num_variants + 1); /// +1 for shared variant. + readVarUInt(structure_state->num_dynamic_types, *structure_stream); + variants.reserve(structure_state->num_dynamic_types + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) variants.push_back(decodeDataType(*structure_stream)); } else { String data_type_name; - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) { readStringBinary(data_type_name, *structure_stream); variants.push_back(DataTypeFactory::instance().get(data_type_name)); @@ -364,9 +362,6 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreamsAndCountTotalSi if (!variant_info.variant_type->equals(*dynamic_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName()); - if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); - settings.path.push_back(Substream::DynamicData); assert_cast(*dynamic_state->variant_serialization) .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics( @@ -424,7 +419,7 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( if (mutable_column->empty()) { - column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types); + column_dynamic.setMaxDynamicPaths(structure_state->num_dynamic_types); column_dynamic.setVariantType(structure_state->variant_type); column_dynamic.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index f34b5d0e770..ac98bbbc8b5 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -16,18 +16,28 @@ public: { } - struct DynamicStructureSerializationVersion + struct DynamicSerializationVersion { enum Value { - VariantTypeName = 1, + /// V1 serialization: + /// - DynamicStructure stream: + /// + /// + /// + /// (only in MergeTree serialization) + /// (only in MergeTree serialization) + /// - DynamicData stream: contains the data of nested Variant column. + V1 = 1, + /// V2 serialization: the same as V1 but without max_dynamic_types parameter in DynamicStructure stream. + V2 = 2, }; Value value; static void checkVersion(UInt64 version); - explicit DynamicStructureSerializationVersion(UInt64 version); + explicit DynamicSerializationVersion(UInt64 version); }; void enumerateStreams( @@ -113,9 +123,9 @@ private: struct DeserializeBinaryBulkStateDynamicStructure : public ISerialization::DeserializeBinaryBulkState { - DynamicStructureSerializationVersion structure_version; + DynamicSerializationVersion structure_version; DataTypePtr variant_type; - size_t max_dynamic_types; + size_t num_dynamic_types; ColumnDynamic::StatisticsPtr statistics; explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 760f6ce750d..b3ac2c52d70 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -68,14 +68,13 @@ SerializationObject::ObjectSerializationVersion::ObjectSerializationVersion(UInt void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 version) { - if (version != BASIC) + if (version != V1 && version != V2) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Object structure serialization."); } struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState { SerializationObject::ObjectSerializationVersion serialization_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_map typed_path_states; std::unordered_map dynamic_path_states; @@ -193,13 +192,10 @@ void SerializationObject::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state prefix"); /// Write serialization version. - UInt64 serialization_version = ObjectSerializationVersion::Value::BASIC; + UInt64 serialization_version = ObjectSerializationVersion::Value::V2; writeBinaryLittleEndian(serialization_version, *stream); auto object_state = std::make_shared(serialization_version); - object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); - /// Write max_dynamic_paths parameter. - writeVarUInt(object_state->max_dynamic_paths, *stream); /// Write all dynamic paths in sorted order. object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); for (const auto & [path, _] : dynamic_paths) @@ -353,8 +349,13 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb UInt64 serialization_version; readBinaryLittleEndian(serialization_version, *structure_stream); auto structure_state = std::make_shared(serialization_version); - /// Read max_dynamic_paths parameter. - readVarUInt(structure_state->max_dynamic_paths, *structure_stream); + if (structure_state->structure_version.value == ObjectSerializationVersion::Value::V1) + { + /// Skip max_dynamic_paths parameter in V1 serialization version. + size_t max_dynamic_paths; + readVarUInt(max_dynamic_paths, *structure_stream); + } + /// Read the sorted list of dynamic paths. size_t dynamic_paths_size; readVarUInt(dynamic_paths_size, *structure_stream); @@ -411,9 +412,6 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( const auto & shared_data = column_object.getSharedDataPtr(); auto * object_state = checkAndGetState(state); - if (column_object.getMaxDynamicPaths() != object_state->max_dynamic_paths) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_paths parameter of Object. Expected: {}, Got: {}", object_state->max_dynamic_paths, column_object.getMaxDynamicPaths()); - if (column_object.getDynamicPaths().size() != object_state->sorted_dynamic_paths.size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of number of dynamic paths in Object. Expected: {}, Got: {}", object_state->sorted_dynamic_paths.size(), column_object.getDynamicPaths().size()); @@ -538,7 +536,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( /// If it's a new object column, set dynamic paths and statistics. if (column_object.empty()) { - column_object.setMaxDynamicPaths(structure_state->max_dynamic_paths); + column_object.setMaxDynamicPaths(structure_state->sorted_dynamic_paths.size()); column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); column_object.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 62ff9849f45..ba66dd6470e 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -19,7 +19,20 @@ public: { enum Value { - BASIC = 0, + /// V1 serialization: + /// - ObjectStructure stream: + /// + /// + /// + /// (only in MergeTree serialization) + /// (only in MergeTree serialization) + /// - ObjectData stream: + /// - ObjectTypedPath stream for each column in typed paths + /// - ObjectDynamicPath stream for each column in dynamic paths + /// - ObjectSharedData stream shared data column. + V1 = 0, + /// V2 serialization: the same as V1 but without max_dynamic_paths parameter in ObjectStructure stream. + V2 = 2, }; Value value; @@ -82,7 +95,6 @@ private: struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { ObjectSerializationVersion structure_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_set dynamic_paths; /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index ed13e581759..a7098e85ea0 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -83,6 +83,7 @@ namespace Setting extern const SettingsBool input_format_ipv4_default_on_conversion_error; extern const SettingsBool input_format_ipv6_default_on_conversion_error; extern const SettingsBool precise_float_parsing; + extern const SettingsBool cast_to_json_disable_dynamic_subcolumns; } namespace ErrorCodes @@ -4056,9 +4057,7 @@ private: { return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); - res->finalize(); - return res; + return ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); }; } diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 460d74e68bf..0d7d3295e0a 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1466,13 +1466,13 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); - bool has_object_type = isObject(command.data_type); - command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); - if (has_object_type) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "The change of data type {} of column {} to {} is not supported.", - old_data_type->getName(), backQuote(column_name), command.data_type->getName()); +// bool has_object_type = isObject(command.data_type); +// command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); +// if (has_object_type) +// throw Exception( +// ErrorCodes::BAD_ARGUMENTS, +// "The change of data type {} of column {} to {} is not supported.", +// old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index a859172023f..96623307c8f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -61,22 +61,6 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( } } -void MergeTreeDataPartWriterCompact::initDynamicStreamsIfNeeded(const Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block.getByName(column.name).column, compression); - } - } -} - void MergeTreeDataPartWriterCompact::addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) { ISerialization::StreamCallback callback = [&](const auto & substream_path) @@ -175,20 +159,25 @@ void writeColumnSingleGranule( void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block result_block = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(result_block); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(result_block); assert(index_granularity_for_block >= 1); - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, result_block.rows()); } - Block result_block = permuteBlockIfNeeded(block, permutation); + result_block = permuteBlockIfNeeded(result_block, permutation); if (!header) header = result_block.cloneEmpty(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index b440a37222d..03da9c5f754 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -48,9 +48,7 @@ private: void addToChecksums(MergeTreeDataPartChecksums & checksums); - void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) override; Block header; @@ -104,8 +102,6 @@ private: /// then finally to 'marks_file'. std::unique_ptr marks_compressor; std::unique_ptr marks_source_hashing; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 35914d8c50a..fbf6ac769a0 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -557,6 +557,45 @@ Names MergeTreeDataPartWriterOnDisk::getSkipIndicesColumns() const return Names(skip_indexes_column_names_set.begin(), skip_indexes_column_names_set.end()); } +void MergeTreeDataPartWriterOnDisk::initOrAdjustDynamicStructureIfNeeded(Block & block) +{ + if (!is_dynamic_streams_initialized) + { + for (const auto & column : columns_list) + { + if (column.type->hasDynamicSubcolumns()) + { + /// Create all streams for dynamic subcolumns using dynamic structure from block. + auto compression = getCodecDescOrDefault(column.name, default_codec); + addStreams(column, block.getByName(column.name).column, compression); + } + } + is_dynamic_streams_initialized = true; + block_sample = block.cloneEmpty(); + } + else + { + size_t size = block.columns(); + for (size_t i = 0; i != size; ++i) + { + auto & column = block.getByPosition(i); + const auto & sample_column = block_sample.getByPosition(i); + /// Check if the dynamic structure of this column is different from the sample column. + if (column.type->hasDynamicSubcolumns() && !column.column->dynamicStructureEquals(*sample_column.column)) + { + /// We need to change the dynamic structure of the column so it matches the sample column. + /// To do it, we create empty column of this type, take dynamic structure from sample column + /// and insert data into it. Resulting column will have required dynamic structure and the content + /// of the column in current block. + auto new_column = sample_column.type->createColumn(); + new_column->takeDynamicStructureFromSourceColumns({sample_column.column}); + new_column->insertRangeFrom(*column.column, 0, column.column->size()); + column.column = std::move(new_column); + } + } + } +} + template struct MergeTreeDataPartWriterOnDisk::Stream; template struct MergeTreeDataPartWriterOnDisk::Stream; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 8d84442981e..69a089eda1b 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -153,6 +153,14 @@ protected: /// Get unique non ordered skip indices column. Names getSkipIndicesColumns() const; + virtual void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) = 0; + + /// On first block create all required streams for columns with dynamic subcolumns and remember the block sample. + /// On each next block check if dynamic structure of the columns equals to the dynamic structure of the same + /// columns in the sample block. If for some column dynamic structure is different, adjust it so it matches + /// the structure from the sample. + void initOrAdjustDynamicStructureIfNeeded(Block & block); + const MergeTreeIndices skip_indices; const ColumnsStatistics stats; @@ -187,6 +195,10 @@ protected: size_t current_mark = 0; GinIndexStoreFactory::GinIndexStores gin_index_stores; + + bool is_dynamic_streams_initialized = false; + Block block_sample; + private: void initSkipIndices(); void initPrimaryIndex(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 04e07a0588a..ba9d82fd097 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -106,23 +106,6 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( } } -void MergeTreeDataPartWriterWide::initDynamicStreamsIfNeeded(const DB::Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - block_sample = block.cloneEmpty(); - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block_sample.getByName(column.name).column, compression); - } - } -} - void MergeTreeDataPartWriterWide::addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, @@ -260,15 +243,20 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block block_to_write = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(block_to_write); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical part of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(block_to_write); if (rows_written_in_last_mark > 0) { size_t rows_left_in_last_mark = index_granularity.getMarkRows(getCurrentMark()) - rows_written_in_last_mark; @@ -286,11 +274,9 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm } } - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, block_to_write.rows()); } - Block block_to_write = block; - auto granules_to_write = getGranulesToWrite(index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index ab86ed27c7e..78dfc93c4d2 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -91,9 +91,7 @@ private: void addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, - const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + const ASTPtr & effective_codec_desc) override; /// Method for self check (used in debug-build only). Checks that written /// data and corresponding marks are consistent. Otherwise throws logical @@ -139,10 +137,6 @@ private: /// How many rows we have already written in the current mark. /// More than zero when incoming blocks are smaller then their granularity. size_t rows_written_in_last_mark = 0; - - Block block_sample; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference new file mode 100644 index 00000000000..a2d3a799fff --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference @@ -0,0 +1,134 @@ +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value300000"} +{"key3":"value300001"} +{"key2":"value300002"} +{"key3":"value300003"} +{"key2":"value300004"} +{"key3":"value300005"} +{"key2":"value300006"} +{"key3":"value300007"} +{"key2":"value300008"} +{"key3":"value300009"} +{"key4":"value600000"} +{"key5":"value600001"} +{"key4":"value600002"} +{"key5":"value600003"} +{"key4":"value600004"} +{"key5":"value600005"} +{"key4":"value600006"} +{"key5":"value600007"} +{"key4":"value600008"} +{"key5":"value600009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value300000 \N \N \N +\N \N \N value300001 \N \N +\N \N value300002 \N \N \N +\N \N \N value300003 \N \N +\N \N value300004 \N \N \N +\N \N \N value300005 \N \N +\N \N value300006 \N \N \N +\N \N \N value300007 \N \N +\N \N value300008 \N \N \N +\N \N \N value300009 \N \N +\N \N \N \N value600000 \N +\N \N \N \N \N value600001 +\N \N \N \N value600002 \N +\N \N \N \N \N value600003 +\N \N \N \N value600004 \N +\N \N \N \N \N value600005 +\N \N \N \N value600006 \N +\N \N \N \N \N value600007 +\N \N \N \N value600008 \N +\N \N \N \N \N value600009 +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value300000"} +{"key3":"value300001"} +{"key2":"value300002"} +{"key3":"value300003"} +{"key2":"value300004"} +{"key3":"value300005"} +{"key2":"value300006"} +{"key3":"value300007"} +{"key2":"value300008"} +{"key3":"value300009"} +{"key4":"value600000"} +{"key5":"value600001"} +{"key4":"value600002"} +{"key5":"value600003"} +{"key4":"value600004"} +{"key5":"value600005"} +{"key4":"value600006"} +{"key5":"value600007"} +{"key4":"value600008"} +{"key5":"value600009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value300000 \N \N \N +\N \N \N value300001 \N \N +\N \N value300002 \N \N \N +\N \N \N value300003 \N \N +\N \N value300004 \N \N \N +\N \N \N value300005 \N \N +\N \N value300006 \N \N \N +\N \N \N value300007 \N \N +\N \N value300008 \N \N \N +\N \N \N value300009 \N \N +\N \N \N \N value600000 \N +\N \N \N \N \N value600001 +\N \N \N \N value600002 \N +\N \N \N \N \N value600003 +\N \N \N \N value600004 \N +\N \N \N \N \N value600005 +\N \N \N \N value600006 \N +\N \N \N \N \N value600007 +\N \N \N \N value600008 \N +\N \N \N \N \N value600009 diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 new file mode 100644 index 00000000000..a13867b145d --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -0,0 +1,32 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;', + 'create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;'] -%} + +{{ create_command }} + +insert into test select number, toJSONString(map('key' || multiIf(number < 300000, number % 2, number < 600000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(1000000); + +alter table test modify column json JSON settings mutations_sync=1; + +select 'All paths:'; +select distinctJSONPaths(json) from test; +select 'Shared data paths:'; +select distinct (arrayJoin(JSONSharedDataPaths(json))) as path from test order by path; +select json from test order by x limit 10; +select json from test order by x limit 10 offset 300000; +select json from test order by x limit 10 offset 600000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 300000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 600000; + +select json from test format Null; +select json from test order by x format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x format Null; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference new file mode 100644 index 00000000000..ca2fb7e8ff9 --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference @@ -0,0 +1,12 @@ +5000 +leonardomso/33-js-concepts 3 +ytdl-org/youtube-dl 3 +Bogdanp/neko 2 +bminossi/AllVideoPocsFromHackerOne 2 +disclose/diodata 2 +Commit 182 +chipeo345 119 +phanwi346 114 +Nicholas Piggin 95 +direwolf-github 49 +2 diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh new file mode 100755 index 00000000000..931d106120c --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: too memory hungry + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \ + --max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsString" + +${CLICKHOUSE_CLIENT} -q "ALTER TABLE ghdata MODIFY column data JSON SETTINGS mutations_sync=1" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" + +${CLICKHOUSE_CLIENT} -q \ +"SELECT data.repo.name, count() AS stars FROM ghdata \ + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + +${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ +"SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ + ARRAY JOIN data.payload.commits[].author.name \ + GROUP BY name ORDER BY c DESC, name LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql new file mode 100644 index 00000000000..87e10df9cc8 --- /dev/null +++ b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql @@ -0,0 +1,17 @@ +set allow_experimental_json_type=1; +set max_insert_block_size=10000; +set max_block_size=10000; + +drop table if exists test; +drop named collection if exists json_alter_fuzzer; + +create table test (json String) engine=MergeTree order by tuple(); +create named collection json_alter_fuzzer AS json_str='{}'; +insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=128) limit 200000; +alter table test modify column json JSON settings mutations_sync=1; +select json from test format Null; +optimize table test final; +select json from test format Null; +drop named collection json_alter_fuzzer; +drop table test; + From a9fc07d9af728f56b7b43c53403e278ae69e8096 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 07:06:10 +0000 Subject: [PATCH 077/566] Remove unneded changes --- src/Storages/AlterCommands.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 0d7d3295e0a..9972b34ecc4 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1465,14 +1465,6 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const ErrorCodes::BAD_ARGUMENTS, "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); - -// bool has_object_type = isObject(command.data_type); -// command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); -// if (has_object_type) -// throw Exception( -// ErrorCodes::BAD_ARGUMENTS, -// "The change of data type {} of column {} to {} is not supported.", -// old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) From a10c2674fe15c977a51c1ae7054f9f8e9bc4f7a3 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 07:20:10 +0000 Subject: [PATCH 078/566] Add example in docs --- docs/en/sql-reference/data-types/newjson.md | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 68952590eb9..f799072a02f 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -630,6 +630,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi └─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘ ``` +## ALTER MODIFY COLUMN to JSON type + +It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported. + +**Example** + +```sql +CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple(); +INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}'); +ALTER TABLE test MODIFY COLUMN json JSON; +SELECT json, json.a, json.b, json.c FROM test; +``` + +```text + ┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ +1. │ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +2. │ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ +3. │ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ +4. │ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ + └──────────────────────────────┴────────┴─────────┴────────────┘ +``` + ## Tips for better usage of the JSON type Before creating `JSON` column and loading data into it, consider the following tips: From 07da0c99b8318cd368c52a0d573e598599207196 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 05:52:25 +0000 Subject: [PATCH 079/566] Fix tests --- .../03225_alter_to_json_not_supported.reference | 0 .../03225_alter_to_json_not_supported.sql | 15 --------------- .../03248_string_to_json_alter_fuzz.sql | 4 ++-- 3 files changed, 2 insertions(+), 17 deletions(-) delete mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.reference delete mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.sql diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference b/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql deleted file mode 100644 index 398494d56de..00000000000 --- a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql +++ /dev/null @@ -1,15 +0,0 @@ -set allow_experimental_json_type = 1; - -drop table if exists test; -create table test (s String) engine=MergeTree order by tuple(); -alter table test modify column s JSON; -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Array(String)) engine=MergeTree order by tuple(); -alter table test modify column s Array(JSON); -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Tuple(String, String)) engine=MergeTree order by tuple(); -alter table test modify column s Tuple(JSON, String); -- { serverError BAD_ARGUMENTS } -drop table test; - diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql index 87e10df9cc8..d4d775732e8 100644 --- a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql +++ b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql @@ -7,8 +7,8 @@ drop named collection if exists json_alter_fuzzer; create table test (json String) engine=MergeTree order by tuple(); create named collection json_alter_fuzzer AS json_str='{}'; -insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=128) limit 200000; -alter table test modify column json JSON settings mutations_sync=1; +insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=64) limit 200000; +alter table test modify column json JSON(max_dynamic_paths=100) settings mutations_sync=1; select json from test format Null; optimize table test final; select json from test format Null; From c6b58f4db2461bcdc09929b67a84b9d061ddefd5 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 08:01:45 +0000 Subject: [PATCH 080/566] Better docs --- docs/en/sql-reference/data-types/newjson.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index f799072a02f..8e9eeb43c72 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -644,12 +644,12 @@ SELECT json, json.a, json.b, json.c FROM test; ``` ```text - ┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ -1. │ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -2. │ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ -3. │ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ -4. │ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ - └──────────────────────────────┴────────┴─────────┴────────────┘ +┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ +│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ +│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ +│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ +└──────────────────────────────┴────────┴─────────┴────────────┘ ``` ## Tips for better usage of the JSON type From 41588b05cf1c8104a1e2e344b043a4eec5db5f10 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 08:10:21 +0000 Subject: [PATCH 081/566] Fix test --- ...mic_variant_in_order_by_group_by.reference | 188 +++++++++--------- ...1_dynamic_variant_in_order_by_group_by.sql | 32 +-- 2 files changed, 110 insertions(+), 110 deletions(-) diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference index 5c7b4cb0bea..5983dd15f5b 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -20,98 +20,6 @@ 4 0 1 -4 -3 -2 -0 -1 -4 -3 -2 -[4] -[3] -[2] -[0] -[1] -{'str':0} -{'str':1} -{'str':4} -{'str':3} -{'str':2} -0 -1 -2 -3 -4 -\N -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -4 -3 -2 -0 -1 -4 -3 -2 -[4] -[3] -[2] -[0] -[1] -{'str':0} -{'str':1} -{'str':4} -{'str':3} -{'str':2} -0 -1 -2 -3 -4 -\N -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 2 3 4 @@ -120,11 +28,11 @@ 2 3 4 -[4] [0] [1] [2] [3] +[4] {'str':0} {'str':1} {'str':2} @@ -166,11 +74,103 @@ 2 3 4 -[4] [0] [1] [2] [3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] {'str':0} {'str':1} {'str':2} diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql index 6e4a39c7234..a53b02e8e41 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -53,10 +53,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; @@ -86,10 +86,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; drop table test; @@ -124,10 +124,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; @@ -157,10 +157,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; drop table test; From c4cc4cca91ee5191cdc37ef3de14ea3cd70514d6 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 9 Oct 2024 03:14:48 +0000 Subject: [PATCH 082/566] Fix tests and builds --- .../MergeTreeDataPartWriterCompact.cpp | 2 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 2 +- .../03246_alter_from_string_to_json.reference | 160 +++++++++--------- .../03246_alter_from_string_to_json.sql.j2 | 11 +- .../03248_string_to_json_alter_fuzz.reference | 0 .../03248_string_to_json_alter_fuzz.sql | 17 -- 6 files changed, 88 insertions(+), 104 deletions(-) delete mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference delete mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 96623307c8f..377677c5244 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -57,7 +57,7 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); + MergeTreeDataPartWriterCompact::addStreams(column, nullptr, compression); } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index ba9d82fd097..f015fcb0d10 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -102,7 +102,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); + MergeTreeDataPartWriterWide::addStreams(column, nullptr, compression); } } diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference index a2d3a799fff..8253c4fef48 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference @@ -15,26 +15,26 @@ key5 {"key1":"value7"} {"key0":"value8"} {"key1":"value9"} -{"key2":"value300000"} -{"key3":"value300001"} -{"key2":"value300002"} -{"key3":"value300003"} -{"key2":"value300004"} -{"key3":"value300005"} -{"key2":"value300006"} -{"key3":"value300007"} -{"key2":"value300008"} -{"key3":"value300009"} -{"key4":"value600000"} -{"key5":"value600001"} -{"key4":"value600002"} -{"key5":"value600003"} -{"key4":"value600004"} -{"key5":"value600005"} -{"key4":"value600006"} -{"key5":"value600007"} -{"key4":"value600008"} -{"key5":"value600009"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} value0 \N \N \N \N \N \N value1 \N \N \N \N value2 \N \N \N \N \N @@ -45,26 +45,26 @@ value6 \N \N \N \N \N \N value7 \N \N \N \N value8 \N \N \N \N \N \N value9 \N \N \N \N -\N \N value300000 \N \N \N -\N \N \N value300001 \N \N -\N \N value300002 \N \N \N -\N \N \N value300003 \N \N -\N \N value300004 \N \N \N -\N \N \N value300005 \N \N -\N \N value300006 \N \N \N -\N \N \N value300007 \N \N -\N \N value300008 \N \N \N -\N \N \N value300009 \N \N -\N \N \N \N value600000 \N -\N \N \N \N \N value600001 -\N \N \N \N value600002 \N -\N \N \N \N \N value600003 -\N \N \N \N value600004 \N -\N \N \N \N \N value600005 -\N \N \N \N value600006 \N -\N \N \N \N \N value600007 -\N \N \N \N value600008 \N -\N \N \N \N \N value600009 +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 All paths: ['key0','key1','key2','key3','key4','key5'] Shared data paths: @@ -82,26 +82,26 @@ key5 {"key1":"value7"} {"key0":"value8"} {"key1":"value9"} -{"key2":"value300000"} -{"key3":"value300001"} -{"key2":"value300002"} -{"key3":"value300003"} -{"key2":"value300004"} -{"key3":"value300005"} -{"key2":"value300006"} -{"key3":"value300007"} -{"key2":"value300008"} -{"key3":"value300009"} -{"key4":"value600000"} -{"key5":"value600001"} -{"key4":"value600002"} -{"key5":"value600003"} -{"key4":"value600004"} -{"key5":"value600005"} -{"key4":"value600006"} -{"key5":"value600007"} -{"key4":"value600008"} -{"key5":"value600009"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} value0 \N \N \N \N \N \N value1 \N \N \N \N value2 \N \N \N \N \N @@ -112,23 +112,23 @@ value6 \N \N \N \N \N \N value7 \N \N \N \N value8 \N \N \N \N \N \N value9 \N \N \N \N -\N \N value300000 \N \N \N -\N \N \N value300001 \N \N -\N \N value300002 \N \N \N -\N \N \N value300003 \N \N -\N \N value300004 \N \N \N -\N \N \N value300005 \N \N -\N \N value300006 \N \N \N -\N \N \N value300007 \N \N -\N \N value300008 \N \N \N -\N \N \N value300009 \N \N -\N \N \N \N value600000 \N -\N \N \N \N \N value600001 -\N \N \N \N value600002 \N -\N \N \N \N \N value600003 -\N \N \N \N value600004 \N -\N \N \N \N \N value600005 -\N \N \N \N value600006 \N -\N \N \N \N \N value600007 -\N \N \N \N value600008 \N -\N \N \N \N \N value600009 +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 index a13867b145d..e8760b659dc 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -1,4 +1,5 @@ set allow_experimental_json_type = 1; +set max_block_size = 20000; drop table if exists test; @@ -7,7 +8,7 @@ drop table if exists test; {{ create_command }} -insert into test select number, toJSONString(map('key' || multiIf(number < 300000, number % 2, number < 600000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(1000000); +insert into test select number, toJSONString(map('key' || multiIf(number < 60000, number % 2, number < 120000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(200000); alter table test modify column json JSON settings mutations_sync=1; @@ -16,11 +17,11 @@ select distinctJSONPaths(json) from test; select 'Shared data paths:'; select distinct (arrayJoin(JSONSharedDataPaths(json))) as path from test order by path; select json from test order by x limit 10; -select json from test order by x limit 10 offset 300000; -select json from test order by x limit 10 offset 600000; +select json from test order by x limit 10 offset 60000; +select json from test order by x limit 10 offset 120000; select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10; -select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 300000; -select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 600000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 60000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 120000; select json from test format Null; select json from test order by x format Null; diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql deleted file mode 100644 index d4d775732e8..00000000000 --- a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql +++ /dev/null @@ -1,17 +0,0 @@ -set allow_experimental_json_type=1; -set max_insert_block_size=10000; -set max_block_size=10000; - -drop table if exists test; -drop named collection if exists json_alter_fuzzer; - -create table test (json String) engine=MergeTree order by tuple(); -create named collection json_alter_fuzzer AS json_str='{}'; -insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=64) limit 200000; -alter table test modify column json JSON(max_dynamic_paths=100) settings mutations_sync=1; -select json from test format Null; -optimize table test final; -select json from test format Null; -drop named collection json_alter_fuzzer; -drop table test; - From df77c6f120beddfe97ff4c8c247473db56c587d7 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:24:47 +0800 Subject: [PATCH 083/566] Print invalid version in exception message --- src/DataTypes/Serializations/SerializationDynamic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index b00668fa8a4..0e6e866e454 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -89,7 +89,7 @@ SerializationDynamic::DynamicSerializationVersion::DynamicSerializationVersion(U void SerializationDynamic::DynamicSerializationVersion::checkVersion(UInt64 version) { if (version != V1 && version != V2) - throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization."); + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization: {}", version); } void SerializationDynamic::serializeBinaryBulkStatePrefix( From d10b79020edda1f35f9f3637447cd57d90352bae Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 14 Oct 2024 13:39:03 +0000 Subject: [PATCH 084/566] Refactoring TempDataOnDisk --- src/Common/CurrentMetrics.cpp | 1 + .../gtest_cascade_and_memory_write_buffer.cpp | 2 +- src/Interpreters/Aggregator.cpp | 12 +- src/Interpreters/Aggregator.h | 9 +- src/Interpreters/Context.cpp | 20 +- src/Interpreters/GraceHashJoin.cpp | 60 +- src/Interpreters/GraceHashJoin.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 29 +- src/Interpreters/HashJoin/HashJoin.h | 5 +- src/Interpreters/TableJoin.h | 7 +- src/Interpreters/TemporaryDataOnDisk.cpp | 647 ++++++++---------- src/Interpreters/TemporaryDataOnDisk.h | 233 ++++--- src/Interpreters/tests/gtest_filecache.cpp | 85 ++- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 - src/Processors/QueryPlan/SortingStep.cpp | 6 +- .../Transforms/AggregatingTransform.cpp | 27 +- .../Transforms/MergeSortingTransform.cpp | 44 +- .../Transforms/MergeSortingTransform.h | 5 +- src/QueryPipeline/QueryPipelineBuilder.h | 6 + src/QueryPipeline/QueryPlanResourceHolder.h | 2 + src/Server/HTTPHandler.cpp | 39 +- src/Storages/MergeTree/MergeTask.cpp | 124 +--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 37 files changed, 646 insertions(+), 789 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index bd62e7e8aae..8d232e11df3 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -45,6 +45,7 @@ M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \ M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \ M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \ + M(TemporaryFilesForMerge, "Number of temporary files for vertical merge") \ M(TemporaryFilesUnknown, "Number of temporary files created without known purpose") \ M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \ M(RemoteRead, "Number of read with remote reader in fly") \ diff --git a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp index 23b783173c8..6fd7570c4eb 100644 --- a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp +++ b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp @@ -69,7 +69,7 @@ static void testCascadeBufferRedability( auto rbuf = wbuf_readable.tryGetReadBuffer(); ASSERT_FALSE(!rbuf); - concat.appendBuffer(wrapReadBufferPointer(std::move(rbuf))); + concat.appendBuffer(std::move(rbuf)); } std::string decoded_data; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 0fe1c74ed17..e6fecc37cfa 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -335,7 +335,7 @@ Aggregator::Aggregator(const Block & header_, const Params & params_) : header(header_) , keys_positions(calculateKeysPositions(header, params_)) , params(params_) - , tmp_data(params.tmp_data_scope ? std::make_unique(params.tmp_data_scope, CurrentMetrics::TemporaryFilesForAggregation) : nullptr) + , tmp_data(params.tmp_data_scope ? params.tmp_data_scope->childScope(CurrentMetrics::TemporaryFilesForAggregation) : nullptr) , min_bytes_for_prefetch(getMinBytesForPrefetch()) { /// Use query-level memory tracker @@ -1519,10 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); - auto & out_stream = tmp_data->createStream(getHeader(false), max_temp_file_size); + auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); - LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getPath()); + LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); /// Flush only two-level data and possibly overflow data. @@ -1643,7 +1643,7 @@ template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const + TemporaryBlockStreamHolder & out) const { size_t max_temporary_block_size_rows = 0; size_t max_temporary_block_size_bytes = 0; @@ -1660,14 +1660,14 @@ void Aggregator::writeToTemporaryFileImpl( for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) { Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket); - out.write(block); + out->write(block); update_max_sizes(block); } if (params.overflow_row) { Block block = prepareBlockAndFillWithoutKey(data_variants, false, true); - out.write(block); + out->write(block); update_max_sizes(block); } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 4de0a640219..bc28d3dccb8 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return tmp_data && !tmp_data->empty(); } + bool hasTemporaryData() const { return !tmp_files.empty(); } - const TemporaryDataOnDisk & getTemporaryData() const { return *tmp_data; } + std::vector & getTemporaryData() { return tmp_files; } /// Get data structure of the result. Block getHeader(bool final) const; @@ -355,7 +355,8 @@ private: LoggerPtr log = getLogger("Aggregator"); /// For external aggregation. - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + mutable std::vector tmp_files; size_t min_bytes_for_prefetch = 0; @@ -456,7 +457,7 @@ private: void writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const; + TemporaryBlockStreamHolder & out) const; /// Merge NULL key data from hash table `src` into `dst`. template diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 85cde959b66..6ada12e63f9 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -353,6 +353,8 @@ struct ContextSharedPart : boost::noncopyable /// Child scopes for more fine-grained accounting are created per user/query/etc. /// Initialized once during server startup. TemporaryDataOnDiskScopePtr root_temp_data_on_disk TSA_GUARDED_BY(mutex); + /// TODO: remove, use only root_temp_data_on_disk + VolumePtr temporary_volume_legacy; mutable OnceFlag async_loader_initialized; mutable std::unique_ptr async_loader; /// Thread pool for asynchronous initialization of arbitrary DAG of `LoadJob`s (used for tables loading) @@ -783,10 +785,9 @@ struct ContextSharedPart : boost::noncopyable } /// Special volumes might also use disks that require shutdown. - auto & tmp_data = root_temp_data_on_disk; - if (tmp_data && tmp_data->getVolume()) + if (temporary_volume_legacy) { - auto & disks = tmp_data->getVolume()->getDisks(); + auto & disks = temporary_volume_legacy->getDisks(); for (auto & disk : disks) disk->shutdown(); } @@ -1166,8 +1167,8 @@ VolumePtr Context::getGlobalTemporaryVolume() const SharedLockGuard lock(shared->mutex); /// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly. /// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set). - if (shared->root_temp_data_on_disk) - return shared->root_temp_data_on_disk->getVolume(); + if (shared->temporary_volume_legacy) + return shared->temporary_volume_legacy; return nullptr; } @@ -1288,7 +1289,8 @@ void Context::setTemporaryStoragePath(const String & path, size_t max_size) TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_size) @@ -1336,7 +1338,8 @@ void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_s TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t max_size) @@ -1360,7 +1363,8 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setFlagsPath(const String & path) diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 978782c851f..a2010b7d94b 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -41,15 +41,15 @@ namespace class AccumulatedBlockReader { public: - AccumulatedBlockReader(TemporaryFileStream & reader_, + AccumulatedBlockReader(TemporaryBlockStreamReaderHolder reader_, std::mutex & mutex_, size_t result_block_size_ = 0) - : reader(reader_) + : reader(std::move(reader_)) , mutex(mutex_) , result_block_size(result_block_size_) { - if (!reader.isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Reading not finished file"); + if (!reader) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Reader is nullptr"); } Block read() @@ -63,7 +63,7 @@ namespace size_t rows_read = 0; do { - Block block = reader.read(); + Block block = reader->read(); rows_read += block.rows(); if (!block) { @@ -81,7 +81,7 @@ namespace } private: - TemporaryFileStream & reader; + TemporaryBlockStreamReaderHolder reader; std::mutex & mutex; const size_t result_block_size; @@ -124,12 +124,12 @@ class GraceHashJoin::FileBucket : boost::noncopyable public: using BucketLock = std::unique_lock; - explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, LoggerPtr log_) - : idx{bucket_index_} - , left_file{left_file_} - , right_file{right_file_} - , state{State::WRITING_BLOCKS} - , log{log_} + explicit FileBucket(size_t bucket_index_, TemporaryBlockStreamHolder left_file_, TemporaryBlockStreamHolder right_file_, LoggerPtr log_) + : idx(bucket_index_) + , left_file(std::move(left_file_)) + , right_file(std::move(right_file_)) + , state(State::WRITING_BLOCKS) + , log(log_) { } @@ -157,12 +157,6 @@ public: return addBlockImpl(block, right_file, lock); } - bool finished() const - { - std::unique_lock left_lock(left_file_mutex); - return left_file.isEof(); - } - bool empty() const { return is_empty.load(); } AccumulatedBlockReader startJoining() @@ -172,24 +166,21 @@ public: std::unique_lock left_lock(left_file_mutex); std::unique_lock right_lock(right_file_mutex); - left_file.finishWriting(); - right_file.finishWriting(); - state = State::JOINING_BLOCKS; } - return AccumulatedBlockReader(right_file, right_file_mutex); + return AccumulatedBlockReader(right_file.getReadStream(), right_file_mutex); } AccumulatedBlockReader getLeftTableReader() { ensureState(State::JOINING_BLOCKS); - return AccumulatedBlockReader(left_file, left_file_mutex); + return AccumulatedBlockReader(left_file.getReadStream(), left_file_mutex); } const size_t idx; private: - bool addBlockImpl(const Block & block, TemporaryFileStream & writer, std::unique_lock & lock) + bool addBlockImpl(const Block & block, TemporaryBlockStreamHolder & writer, std::unique_lock & lock) { ensureState(State::WRITING_BLOCKS); @@ -199,7 +190,7 @@ private: if (block.rows()) is_empty = false; - writer.write(block); + writer->write(block); return true; } @@ -217,8 +208,8 @@ private: throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid state transition, expected {}, got {}", expected, state.load()); } - TemporaryFileStream & left_file; - TemporaryFileStream & right_file; + TemporaryBlockStreamHolder left_file; + TemporaryBlockStreamHolder right_file; mutable std::mutex left_file_mutex; mutable std::mutex right_file_mutex; @@ -274,7 +265,7 @@ GraceHashJoin::GraceHashJoin( , max_num_buckets{context->getSettingsRef()[Setting::grace_hash_join_max_buckets]} , left_key_names(table_join->getOnlyClause().key_names_left) , right_key_names(table_join->getOnlyClause().key_names_right) - , tmp_data(std::make_unique(tmp_data_, CurrentMetrics::TemporaryFilesForJoin)) + , tmp_data(tmp_data_->childScope(CurrentMetrics::TemporaryFilesForJoin)) , hash_join(makeInMemoryJoin("grace0")) , hash_join_sample_block(hash_join->savedBlockSample()) { @@ -398,10 +389,10 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - auto & left_file = tmp_data->createStream(left_sample_block); - auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block)); + TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); - BucketPtr new_bucket = std::make_shared(current_size + i, left_file, right_file, log); + BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); } catch (...) @@ -632,12 +623,9 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() for (bucket_idx = bucket_idx + 1; bucket_idx < buckets.size(); ++bucket_idx) { current_bucket = buckets[bucket_idx].get(); - if (current_bucket->finished() || current_bucket->empty()) + if (current_bucket->empty()) { - LOG_TRACE(log, "Skipping {} {} bucket {}", - current_bucket->finished() ? "finished" : "", - current_bucket->empty() ? "empty" : "", - bucket_idx); + LOG_TRACE(log, "Skipping empty bucket {}", bucket_idx); continue; } diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index d31d6886af7..938c9b1facf 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -132,7 +132,7 @@ private: Names left_key_names; Names right_key_names; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; Buckets buckets; mutable SharedMutex rehash_mutex; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 3e7f3deea8b..af23b520abb 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -35,11 +35,6 @@ #include #include -namespace CurrentMetrics -{ - extern const Metric TemporaryFilesForJoin; -} - namespace DB { @@ -64,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - std::unique_ptr reader; + TemporaryBlockStreamReaderHolder reader; }; @@ -106,10 +101,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , instance_id(instance_id_) , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) - , tmp_data( - table_join_->getTempDataOnDisk() - ? std::make_unique(table_join_->getTempDataOnDisk(), CurrentMetrics::TemporaryFilesForJoin) - : nullptr) + , tmp_data(table_join_->getTempDataOnDisk()) , right_sample_block(right_sample_block_) , max_joined_block_rows(table_join->maxJoinedBlockRows()) , instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "") @@ -520,10 +512,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) && (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { - if (tmp_stream == nullptr) - { - tmp_stream = &tmp_data->createStream(right_sample_block); - } + if (!tmp_stream) + tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream->write(block_to_save); return true; } @@ -730,7 +721,7 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - std::unique_ptr reader = nullptr; + TemporaryBlockStreamReaderHolder reader; if (not_processed) { auto & continuation = static_cast(*not_processed); @@ -804,11 +795,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { - if (reader == nullptr) - { - tmp_stream->finishWritingAsyncSafe(); - reader = tmp_stream->getReadStream(); - } + if (!reader) + reader = tmp_stream.getReadStream(); + while (auto block_right = reader->read()) { ++block_number; diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 4c1ebbcdc66..0f50e110db9 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -423,8 +423,9 @@ private: std::vector key_sizes; /// Needed to do external cross join - TemporaryDataOnDiskPtr tmp_data; - TemporaryFileStream* tmp_stream{nullptr}; + TemporaryDataOnDiskScopePtr tmp_data; + TemporaryBlockStreamHolder tmp_stream; + mutable std::once_flag finish_writing; /// Block with columns from the right-side table. Block right_sample_block; diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index e1bae55a4ed..4ecbc9eb960 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -20,6 +20,11 @@ #include #include +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForJoin; +} + namespace DB { @@ -265,7 +270,7 @@ public: VolumePtr getGlobalTemporaryVolume() { return tmp_volume; } - TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } + TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data ? tmp_data->childScope(CurrentMetrics::TemporaryFilesForJoin) : nullptr; } ActionsDAG createJoinedBlockActions(ContextPtr context) const; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 81796678f24..c3b24fb783b 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -27,11 +27,266 @@ namespace DB namespace ErrorCodes { - extern const int TOO_MANY_ROWS_OR_BYTES; + extern const int INVALID_STATE; extern const int LOGICAL_ERROR; extern const int NOT_ENOUGH_SPACE; + extern const int TOO_MANY_ROWS_OR_BYTES; } +namespace +{ + +inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) +{ + if (settings.compression_codec.empty()) + return CompressionCodecFactory::instance().get("NONE"); + + return CompressionCodecFactory::instance().get(settings.compression_codec); +} + +} + +TemporaryFileHolder::TemporaryFileHolder() +{ + ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); +} + + +class TemporaryFileInLocalCache : public TemporaryFileHolder +{ +public: + explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) + { + const auto key = FileSegment::Key::random(); + segment_holder = file_cache.set( + key, 0, std::max(10_MiB, max_file_size), + CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); + + chassert(segment_holder->size() == 1); + segment_holder->front().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); + } + + std::unique_ptr write() override + { + return std::make_unique(&segment_holder->front()); + } + + std::unique_ptr read(size_t buffer_size) const override + { + return std::make_unique(segment_holder->front().getPath(), /* buf_size = */ buffer_size); + } + + String describeFilePath() const override + { + return fmt::format("fscache://{}", segment_holder->front().getPath()); + } + +private: + FileSegmentsHolderPtr segment_holder; +}; + +class TemporaryFileOnLocalDisk : public TemporaryFileHolder +{ +public: + explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) + : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) + { + if (max_file_size > 0) + { + auto reservation = volume->reserve(max_file_size); + if (!reservation) + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + disk = reservation->getDisk(); + } + else + { + disk = volume->getDisk(); + } + chassert(disk); + } + + std::unique_ptr write() override + { + return disk->writeFile(path_to_file); + } + + std::unique_ptr read(size_t buffer_size) const override + { + ReadSettings settings; + settings.local_fs_buffer_size = buffer_size; + settings.remote_fs_buffer_size = buffer_size; + settings.prefetch_buffer_size = buffer_size; + + return disk->readFile(path_to_file, settings); + } + + String describeFilePath() const override + { + return fmt::format("disk({})://{}/{}", disk->getName(), disk->getPath(), path_to_file); + } + + ~TemporaryFileOnLocalDisk() override + try + { + if (disk->exists(path_to_file)) + disk->removeRecursive(path_to_file); + else + LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + +private: + DiskPtr disk; + String path_to_file; +}; + +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume) +{ + if (!volume) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Volume is not initialized"); + return [volume](size_t max_size) -> std::unique_ptr + { + return std::make_unique(volume, max_size); + }; +} + +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache) +{ + if (!file_cache || !file_cache->isInitialized()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "File cache is not initialized"); + return [file_cache](size_t max_size) -> std::unique_ptr + { + return std::make_unique(*file_cache, max_size); + }; +} + +TemporaryDataOnDiskScopePtr TemporaryDataOnDiskScope::childScope(CurrentMetrics::Metric current_metric) +{ + TemporaryDataOnDiskSettings child_settings = settings; + child_settings.current_metric = current_metric; + return std::make_shared(shared_from_this(), child_settings); +} + +TemporaryDataReadBuffer::TemporaryDataReadBuffer(std::unique_ptr in_) + : ReadBuffer(nullptr, 0) + , compressed_buf(std::move(in_)) +{ + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); +} + +bool TemporaryDataReadBuffer::nextImpl() +{ + compressed_buf->position() = position(); + if (!compressed_buf->next()) + { + set(compressed_buf->position(), 0); + return false; + } + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); + return true; +} + +TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WriteBuffer(nullptr, 0) + , parent(parent_) + , file_holder(parent->file_provider(max_file_size == 0 ? parent->getSettings().max_size_on_disk : max_file_size)) + , out_compressed_buf(file_holder->write(), getCodec(parent->getSettings())) +{ + WriteBuffer::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size()); +} + +void TemporaryDataBuffer::nextImpl() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + out_compressed_buf->position() = position(); + out_compressed_buf->next(); + BufferBase::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size(), out_compressed_buf->offset()); + updateAllocAndCheck(); +} + +String TemporaryDataBuffer::describeFilePath() const +{ + return file_holder->describeFilePath(); +} + +TemporaryDataBuffer::~TemporaryDataBuffer() +{ + if (out_compressed_buf) + // read() nor finishWriting() was called + cancel(); +} + +void TemporaryDataBuffer::cancelImpl() noexcept +{ + if (out_compressed_buf) + { + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->cancel(); + out_compressed_buf.getHolder()->cancel(); + out_compressed_buf.reset(); + } +} + +void TemporaryDataBuffer::finalizeImpl() +{ + if (!out_compressed_buf) + return; + + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->finalize(); + out_compressed_buf.getHolder()->finalize(); + + updateAllocAndCheck(); + out_compressed_buf.reset(); +} + +TemporaryDataBuffer::Stat TemporaryDataBuffer::finishWriting() +{ + /// TemporaryDataBuffer::read can be called from multiple threads + std::call_once(write_finished, [this] + { + if (canceled) + throw Exception(ErrorCodes::INVALID_STATE, "Writing to temporary file buffer was not successful"); + next(); + finalize(); + }); + return stat; +} + +std::unique_ptr TemporaryDataBuffer::read() +{ + finishWriting(); + + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files + size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); + return std::make_unique(file_holder->read(buffer_size)); +} + +void TemporaryDataBuffer::updateAllocAndCheck() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + size_t new_compressed_size = out_compressed_buf->getCompressedBytes(); + size_t new_uncompressed_size = out_compressed_buf->getUncompressedBytes(); + + if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", + file_holder ? file_holder->describeFilePath() : "NULL", + new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); + } + + parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); + stat.compressed_size = new_compressed_size; + stat.uncompressed_size = new_uncompressed_size; +} void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta) { @@ -54,391 +309,25 @@ void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssiz stat.uncompressed_size += uncompressed_delta; } -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) +TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WrapperGuard(std::make_unique(parent_, max_file_size), DBMS_TCP_PROTOCOL_VERSION, header_) + , header(header_) {} -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) - , current_metric_scope(metric_scope) -{} - -std::unique_ptr TemporaryDataOnDisk::createRawStream(size_t max_file_size) +TemporaryDataBuffer::Stat TemporaryBlockStreamHolder::finishWriting() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - return std::make_unique(std::move(holder)); - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - return std::make_unique(std::move(tmp_file)); - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); + impl->flush(); + return holder->finishWriting(); } -TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size) +TemporaryBlockStreamReaderHolder TemporaryBlockStreamHolder::getReadStream() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream = streams.emplace_back(std::make_unique(std::move(holder), header, this)); - return *tmp_stream; - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream - = streams.emplace_back(std::make_unique(std::move(tmp_file), header, this)); - return *tmp_stream; - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); -} - -FileSegmentsHolderPtr TemporaryDataOnDisk::createCacheFile(size_t max_file_size) -{ - if (!file_cache) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache"); - - ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); - - const auto key = FileSegment::Key::random(); - auto holder = file_cache->set( - key, 0, std::max(10_MiB, max_file_size), - CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); - - chassert(holder->size() == 1); - holder->back().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); - - return holder; -} - -TemporaryFileOnDiskHolder TemporaryDataOnDisk::createRegularFile(size_t max_file_size) -{ - if (!volume) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no volume"); - - DiskPtr disk; - if (max_file_size > 0) - { - auto reservation = volume->reserve(max_file_size); - if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); - disk = reservation->getDisk(); - } - else - { - disk = volume->getDisk(); - } - /// We do not increment ProfileEvents::ExternalProcessingFilesTotal here because it is incremented in TemporaryFileOnDisk constructor. - return std::make_unique(disk, current_metric_scope); -} - -std::vector TemporaryDataOnDisk::getStreams() const -{ - std::vector res; - std::lock_guard lock(mutex); - res.reserve(streams.size()); - for (const auto & stream : streams) - res.push_back(stream.get()); - return res; -} - -bool TemporaryDataOnDisk::empty() const -{ - std::lock_guard lock(mutex); - return streams.empty(); -} - -static inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) -{ - if (settings.compression_codec.empty()) - return CompressionCodecFactory::instance().get("NONE"); - - return CompressionCodecFactory::instance().get(settings.compression_codec); -} - -struct TemporaryFileStream::OutputWriter -{ - OutputWriter(std::unique_ptr out_buf_, const Block & header_, const TemporaryDataOnDiskSettings & settings) - : out_buf(std::move(out_buf_)) - , out_compressed_buf(*out_buf, getCodec(settings)) - , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) - { - } - - size_t write(const Block & block) - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write to finalized stream"); - size_t written_bytes = out_writer.write(block); - num_rows += block.rows(); - return written_bytes; - } - - void flush() - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot flush finalized stream"); - - out_compressed_buf.next(); - out_buf->next(); - out_writer.flush(); - } - - void finalize() - { - if (finalized) - return; - - /// if we called finalize() explicitly, and got an exception, - /// we don't want to get it again in the destructor, so set finalized flag first - finalized = true; - - out_writer.flush(); - out_compressed_buf.finalize(); - out_buf->finalize(); - } - - ~OutputWriter() - { - try - { - finalize(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - std::unique_ptr out_buf; - CompressedWriteBuffer out_compressed_buf; - NativeWriter out_writer; - - std::atomic_size_t num_rows = 0; - - bool finalized = false; -}; - -TemporaryFileStream::Reader::Reader(const String & path_, const Block & header_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) - , header(header_) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); -} - -TemporaryFileStream::Reader::Reader(const String & path_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading from {}", path); -} - -Block TemporaryFileStream::Reader::read() -{ - if (!in_reader) - { - if (fs::exists(path)) - in_file_buf = std::make_unique(path, size); - else - in_file_buf = std::make_unique(); - - in_compressed_buf = std::make_unique(*in_file_buf); - if (header.has_value()) - in_reader = std::make_unique(*in_compressed_buf, header.value(), DBMS_TCP_PROTOCOL_VERSION); - else - in_reader = std::make_unique(*in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION); - } - return in_reader->read(); -} - -TemporaryFileStream::TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , file(std::move(file_)) - , out_writer(std::make_unique(std::make_unique(file->getAbsolutePath()), header, parent->settings)) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", file->getAbsolutePath()); -} - -TemporaryFileStream::TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , segment_holder(std::move(segments_)) -{ - if (segment_holder->size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream can be created only from single segment"); - auto out_buf = std::make_unique(&segment_holder->front()); - - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", out_buf->getFileName()); - out_writer = std::make_unique(std::move(out_buf), header, parent_->settings); -} - -size_t TemporaryFileStream::write(const Block & block) -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - updateAllocAndCheck(); - size_t bytes_written = out_writer->write(block); - return bytes_written; -} - -void TemporaryFileStream::flush() -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - out_writer->flush(); -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWriting() -{ - if (isWriteFinished()) - return stat; - - if (out_writer) - { - out_writer->finalize(); - /// The amount of written data can be changed after finalization, some buffers can be flushed - /// Need to update the stat - updateAllocAndCheck(); - out_writer.reset(); - - /// reader will be created at the first read call, not to consume memory before it is needed - } - return stat; -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWritingAsyncSafe() -{ - std::call_once(finish_writing, [this]{ finishWriting(); }); - return stat; -} - -bool TemporaryFileStream::isWriteFinished() const -{ - assert(in_reader == nullptr || out_writer == nullptr); - return out_writer == nullptr; -} - -Block TemporaryFileStream::read() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return {}; - - if (!in_reader) - { - in_reader = std::make_unique(getPath(), header, getSize()); - } - - Block block = in_reader->read(); - if (!block) - { - /// finalize earlier to release resources, do not wait for the destructor - this->release(); - } - return block; -} - -std::unique_ptr TemporaryFileStream::getReadStream() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return nullptr; - - return std::make_unique(getPath(), header, getSize()); -} - -void TemporaryFileStream::updateAllocAndCheck() -{ - assert(out_writer); - size_t new_compressed_size = out_writer->out_compressed_buf.getCompressedBytes(); - size_t new_uncompressed_size = out_writer->out_compressed_buf.getUncompressedBytes(); - - if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", - getPath(), new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); - } - - parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); - stat.compressed_size = new_compressed_size; - stat.uncompressed_size = new_uncompressed_size; - stat.num_rows = out_writer->num_rows; -} - -bool TemporaryFileStream::isEof() const -{ - return file == nullptr && !segment_holder; -} - -void TemporaryFileStream::release() -{ - if (in_reader) - in_reader.reset(); - - if (out_writer) - { - out_writer->finalize(); - out_writer.reset(); - } - - if (file) - { - file.reset(); - parent->deltaAllocAndCheck(-stat.compressed_size, -stat.uncompressed_size); - } - - if (segment_holder) - segment_holder.reset(); -} - -String TemporaryFileStream::getPath() const -{ - if (file) - return file->getAbsolutePath(); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getPath(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -size_t TemporaryFileStream::getSize() const -{ - if (file) - return file->getDisk()->getFileSize(file->getRelativePath()); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getReservedSize(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -TemporaryFileStream::~TemporaryFileStream() -{ - try - { - release(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - assert(false); /// deltaAllocAndCheck with negative can't throw exception - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); + return TemporaryBlockStreamReaderHolder(holder->read(), header, DBMS_TCP_PROTOCOL_VERSION); } } diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index d541c93e031..f8d14b00ac5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -4,15 +4,21 @@ #include #include -#include +#include #include -#include -#include +#include + #include #include -#include -#include +#include +#include + +#include + +#include + +class FileCacheTest_TemporaryDataReadBufferSize_Test; namespace CurrentMetrics { @@ -25,11 +31,10 @@ namespace DB class TemporaryDataOnDiskScope; using TemporaryDataOnDiskScopePtr = std::shared_ptr; -class TemporaryDataOnDisk; -using TemporaryDataOnDiskPtr = std::unique_ptr; +class TemporaryDataBuffer; +using TemporaryDataBufferPtr = std::unique_ptr; -class TemporaryFileStream; -using TemporaryFileStreamPtr = std::unique_ptr; +class TemporaryFileHolder; class FileCache; @@ -40,15 +45,26 @@ struct TemporaryDataOnDiskSettings /// Compression codec for temporary data, if empty no compression will be used. LZ4 by default String compression_codec = "LZ4"; + + /// Read/Write internal buffer size + size_t buffer_size = DBMS_DEFAULT_BUFFER_SIZE; + + /// Metrics counter to increment when temporary file in current scope are created + CurrentMetrics::Metric current_metric = CurrentMetrics::TemporaryFilesUnknown; }; +/// Creates temporary files located on specified resource (disk, fs_cache, etc.) +using TemporaryFileProvider = std::function(size_t)>; +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume); +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache); + /* * Used to account amount of temporary data written to disk. * If limit is set, throws exception if limit is exceeded. * Data can be nested, so parent scope accounts all data written by children. * Scopes are: global -> per-user -> per-query -> per-purpose (sorting, aggregation, etc). */ -class TemporaryDataOnDiskScope : boost::noncopyable +class TemporaryDataOnDiskScope : boost::noncopyable, public std::enable_shared_from_this { public: struct StatAtomic @@ -57,164 +73,155 @@ public: std::atomic uncompressed_size; }; - explicit TemporaryDataOnDiskScope(VolumePtr volume_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) + /// Root scope + template + TemporaryDataOnDiskScope(T && storage, TemporaryDataOnDiskSettings settings_) + : file_provider(createTemporaryFileProvider(std::forward(storage))) , settings(std::move(settings_)) {} - explicit TemporaryDataOnDiskScope(VolumePtr volume_, FileCache * file_cache_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) - , file_cache(file_cache_) - , settings(std::move(settings_)) - {} - explicit TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) + TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) : parent(std::move(parent_)) - , volume(parent->volume) - , file_cache(parent->file_cache) + , file_provider(parent->file_provider) , settings(std::move(settings_)) {} - /// TODO: remove - /// Refactor all code that uses volume directly to use TemporaryDataOnDisk. - VolumePtr getVolume() const { return volume; } + TemporaryDataOnDiskScopePtr childScope(CurrentMetrics::Metric current_metric); const TemporaryDataOnDiskSettings & getSettings() const { return settings; } - protected: + friend class TemporaryDataBuffer; + void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta); TemporaryDataOnDiskScopePtr parent = nullptr; - VolumePtr volume = nullptr; - FileCache * file_cache = nullptr; + TemporaryFileProvider file_provider; StatAtomic stat; const TemporaryDataOnDiskSettings settings; }; -/* - * Holds the set of temporary files. - * New file stream is created with `createStream`. - * Streams are owned by this object and will be deleted when it is deleted. - * It's a leaf node in temporary data scope tree. - */ -class TemporaryDataOnDisk : private TemporaryDataOnDiskScope +/** Used to hold the wrapper and wrapped object together. + * This class provides a convenient way to manage the lifetime of both the wrapper and the wrapped object. + * The wrapper class (Impl) stores a reference to the wrapped object (Holder), and both objects are owned by this class. + * The lifetime of the wrapper and the wrapped object should be the same. + * This pattern is commonly used when the caller only needs to interact with the wrapper and doesn't need to be aware of the wrapped object. + * Examples: CompressedWriteBuffer and WriteBuffer, and NativeReader and ReadBuffer. + */ +template +class WrapperGuard { - friend class TemporaryFileStream; /// to allow it to call `deltaAllocAndCheck` to account data - public: - using TemporaryDataOnDiskScope::StatAtomic; + WrapperGuard() = default; - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_); + template + WrapperGuard(std::unique_ptr holder_, Args && ... args) + : holder(std::move(holder_)) + , impl(std::make_unique(*holder, std::forward(args)...)) + {} - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope); + Impl * operator->() { return impl.get(); } + const Impl * operator->() const { return impl.get(); } + Impl & operator*() { return *impl; } + const Impl & operator*() const { return *impl; } + operator bool() const { return impl != nullptr; } - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0); + const Holder * getHolder() const { return holder.get(); } + Holder * getHolder() { return holder.get(); } - /// Write raw data directly into buffer. - /// Differences from `createStream`: - /// 1) it doesn't account data in parent scope - /// 2) returned buffer owns resources (instead of TemporaryDataOnDisk itself) - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - std::unique_ptr createRawStream(size_t max_file_size = 0); + void reset() + { + impl.reset(); + holder.reset(); + } - std::vector getStreams() const; - bool empty() const; - - const StatAtomic & getStat() const { return stat; } - -private: - FileSegmentsHolderPtr createCacheFile(size_t max_file_size); - TemporaryFileOnDiskHolder createRegularFile(size_t max_file_size); - - mutable std::mutex mutex; - std::vector streams TSA_GUARDED_BY(mutex); - - typename CurrentMetrics::Metric current_metric_scope = CurrentMetrics::TemporaryFilesUnknown; +protected: + std::unique_ptr holder; + std::unique_ptr impl; }; -/* - * Data can be written into this stream and then read. - * After finish writing, call `finishWriting` and then either call `read` or 'getReadStream'(only one of the two) to read the data. - * Account amount of data written to disk in parent scope. - */ -class TemporaryFileStream : boost::noncopyable +/// Owns temporary file and provides access to it. +/// On destruction, file is removed and all resources are freed. +/// Lifetime of read/write buffers should be less than lifetime of TemporaryFileHolder. +class TemporaryFileHolder { public: - struct Reader - { - Reader(const String & path, const Block & header_, size_t size = 0); + TemporaryFileHolder(); - explicit Reader(const String & path, size_t size = 0); + virtual std::unique_ptr write() = 0; + virtual std::unique_ptr read(size_t buffer_size) const = 0; - Block read(); + /// Get location for logging purposes + virtual String describeFilePath() const = 0; - const std::string path; - const size_t size; - const std::optional header; + virtual ~TemporaryFileHolder() = default; +}; - std::unique_ptr in_file_buf; - std::unique_ptr in_compressed_buf; - std::unique_ptr in_reader; - }; +class TemporaryDataReadBuffer : public ReadBuffer +{ +public: + explicit TemporaryDataReadBuffer(std::unique_ptr in_); + +private: + friend class ::FileCacheTest_TemporaryDataReadBufferSize_Test; + + bool nextImpl() override; + + WrapperGuard compressed_buf; +}; + +/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +class TemporaryDataBuffer : public WriteBuffer +{ +public: struct Stat { - /// Statistics for file - /// Non-atomic because we don't allow to `read` or `write` into single file from multiple threads size_t compressed_size = 0; size_t uncompressed_size = 0; - size_t num_rows = 0; }; - TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_); - TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_); - - size_t write(const Block & block); - void flush(); + explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + void nextImpl() override; + void finalizeImpl() override; + void cancelImpl() noexcept override; + std::unique_ptr read(); Stat finishWriting(); - Stat finishWritingAsyncSafe(); - bool isWriteFinished() const; - std::unique_ptr getReadStream(); + String describeFilePath() const; - Block read(); - - String getPath() const; - size_t getSize() const; - - Block getHeader() const { return header; } - - /// Read finished and file released - bool isEof() const; - - ~TemporaryFileStream(); + ~TemporaryDataBuffer() override; private: void updateAllocAndCheck(); - /// Release everything, close reader and writer, delete file - void release(); - - TemporaryDataOnDisk * parent; - - Block header; - - /// Data can be stored in file directly or in the cache - TemporaryFileOnDiskHolder file; - FileSegmentsHolderPtr segment_holder; + TemporaryDataOnDiskScope * parent; + std::unique_ptr file_holder; + WrapperGuard out_compressed_buf; + std::once_flag write_finished; Stat stat; +}; - std::once_flag finish_writing; +using TemporaryBlockStreamReaderHolder = WrapperGuard; - struct OutputWriter; - std::unique_ptr out_writer; +class TemporaryBlockStreamHolder : public WrapperGuard +{ +public: + TemporaryBlockStreamHolder() = default; - std::unique_ptr in_reader; + TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + + TemporaryBlockStreamReaderHolder getReadStream() const; + + TemporaryDataBuffer::Stat finishWriting() const; + const Block & getHeader() const { return header; } + +private: + Block header; }; } diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 007b31d9fdc..ae45443d4bd 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -934,7 +934,7 @@ static Block generateBlock(size_t size = 0) return block; } -static size_t readAllTemporaryData(TemporaryFileStream & stream) +static size_t readAllTemporaryData(NativeReader & stream) { Block block; size_t read_rows = 0; @@ -947,6 +947,7 @@ static size_t readAllTemporaryData(TemporaryFileStream & stream) } TEST_F(FileCacheTest, temporaryData) +try { ServerUUID::setRandomForUnitTests(); DB::FileCacheSettings settings; @@ -959,7 +960,7 @@ TEST_F(FileCacheTest, temporaryData) file_cache.initialize(); const auto user = FileCache::getCommonUser(); - auto tmp_data_scope = std::make_shared(nullptr, &file_cache, TemporaryDataOnDiskSettings{}); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto some_data_holder = file_cache.getOrSet(FileCacheKey::fromPath("some_data"), 0, 5_KiB, 5_KiB, CreateFileSegmentSettings{}, 0, user); @@ -982,12 +983,17 @@ TEST_F(FileCacheTest, temporaryData) size_t size_used_with_temporary_data; size_t segments_used_with_temporary_data; + + { - auto tmp_data = std::make_unique(tmp_data_scope); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_TRUE(stream); + /// Do nothitng with stream, just create it and destroy. + } - auto & stream = tmp_data->createStream(generateBlock()); - - ASSERT_GT(stream.write(generateBlock(100)), 0); + { + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_GT(stream->write(generateBlock(100)), 0); ASSERT_GT(file_cache.getUsedCacheSize(), 0); ASSERT_GT(file_cache.getFileSegmentsNum(), 0); @@ -995,22 +1001,22 @@ TEST_F(FileCacheTest, temporaryData) size_t used_size_before_attempt = file_cache.getUsedCacheSize(); /// data can't be evicted because it is still held by `some_data_holder` ASSERT_THROW({ - stream.write(generateBlock(2000)); - stream.flush(); + stream->write(generateBlock(2000)); + stream.finishWriting(); }, DB::Exception); + ASSERT_THROW(stream.finishWriting(), DB::Exception); + ASSERT_EQ(file_cache.getUsedCacheSize(), used_size_before_attempt); } { size_t before_used_size = file_cache.getUsedCacheSize(); - auto tmp_data = std::make_unique(tmp_data_scope); - - auto write_buf_stream = tmp_data->createRawStream(); + auto write_buf_stream = std::make_unique(tmp_data_scope.get()); write_buf_stream->write("1234567890", 10); write_buf_stream->write("abcde", 5); - auto read_buf = dynamic_cast(write_buf_stream.get())->tryGetReadBuffer(); + auto read_buf = write_buf_stream->read(); ASSERT_GT(file_cache.getUsedCacheSize(), before_used_size + 10); @@ -1023,22 +1029,22 @@ TEST_F(FileCacheTest, temporaryData) } { - auto tmp_data = std::make_unique(tmp_data_scope); - auto & stream = tmp_data->createStream(generateBlock()); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); - ASSERT_GT(stream.write(generateBlock(100)), 0); + ASSERT_GT(stream->write(generateBlock(100)), 0); some_data_holder.reset(); - stream.write(generateBlock(2000)); + stream->write(generateBlock(2000)); - auto stat = stream.finishWriting(); + stream.finishWriting(); - ASSERT_TRUE(fs::exists(stream.getPath())); - ASSERT_GT(fs::file_size(stream.getPath()), 100); + String file_path = stream.getHolder()->describeFilePath().substr(strlen("fscache://")); - ASSERT_EQ(stat.num_rows, 2100); - ASSERT_EQ(readAllTemporaryData(stream), 2100); + ASSERT_TRUE(fs::exists(file_path)) << "File " << file_path << " should exist"; + ASSERT_GT(fs::file_size(file_path), 100) << "File " << file_path << " should be larger than 100 bytes"; + + ASSERT_EQ(readAllTemporaryData(*stream.getReadStream()), 2100); size_used_with_temporary_data = file_cache.getUsedCacheSize(); segments_used_with_temporary_data = file_cache.getFileSegmentsNum(); @@ -1054,6 +1060,11 @@ TEST_F(FileCacheTest, temporaryData) ASSERT_LE(file_cache.getUsedCacheSize(), size_used_before_temporary_data); ASSERT_LE(file_cache.getFileSegmentsNum(), segments_used_before_temporary_data); } +catch (...) +{ + std::cerr << getCurrentExceptionMessage(true) << std::endl; + throw; +} TEST_F(FileCacheTest, CachedReadBuffer) { @@ -1148,18 +1159,22 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) DB::FileCache file_cache("cache", settings); file_cache.initialize(); - auto tmp_data_scope = std::make_shared(/*volume=*/nullptr, &file_cache, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); - /// We allocate buffer of size min(getSize(), DBMS_DEFAULT_BUFFER_SIZE) + stream->write(block); + auto stat = stream.finishWriting(); + + /// We allocate buffer of size min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE) /// We do care about buffer size because realistic external group by could generate 10^5 temporary files - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); + + auto reader = stream.getReadStream(); + auto * read_buf = reader.getHolder(); + const auto & internal_buffer = static_cast(read_buf)->compressed_buf.getHolder()->internalBuffer(); + ASSERT_EQ(internal_buffer.size(), 62); } /// Temporary data stored on disk @@ -1170,16 +1185,14 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) disk = createDisk("temporary_data_read_buffer_size_test_dir"); VolumePtr volume = std::make_shared("volume", disk); - auto tmp_data_scope = std::make_shared(/*volume=*/volume, /*cache=*/nullptr, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(volume, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); + stream->write(block); + auto stat = stream.finishWriting(); - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); } } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 07ee8f4ddef..1560e88ffef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -29,17 +30,18 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - out_row_sources_buf_, + temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) + , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index 99fd95d82d9..b7bb9914cf8 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,6 +11,8 @@ namespace Poco namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -35,7 +37,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -62,6 +64,8 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. + std::shared_ptr temp_data_buffer = nullptr; + LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 3a9cf7ee141..d4e4ba6aa5f 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { @@ -15,7 +16,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index c889668a38e..fc300e41026 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,6 +9,8 @@ namespace DB { +class TemporaryDataBuffer; + /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -21,7 +23,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -45,7 +47,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - WriteBuffer * out_row_sources_buf = nullptr; + std::shared_ptr out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index cd347d371d9..a3a33080f52 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -37,12 +38,13 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) + , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index 2f23f2a5c4d..d3b9837a253 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,6 +24,8 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -59,6 +61,8 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. + std::shared_ptr temp_data_buffer = nullptr; + /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 9f124c6ba18..1ceb1f46234 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -14,12 +15,13 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) + , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index e6d20ddac75..6f877459147 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,6 +8,8 @@ namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -22,7 +24,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -37,6 +39,8 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; + std::shared_ptr temp_data_buffer = nullptr; + std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 99fb700abf1..9b09c802783 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d2895a2a2e9..13330dcff6d 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 6e52450efa7..fb8e5ce74e3 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index dc262aab9ee..a9d9f4fb619 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 32b5d7bf343..0bdccd4795d 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index d99f9a7d1f1..1c03a4d74cd 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,7 +20,6 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; - TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..4fde246f764 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -280,9 +280,9 @@ void SortingStep::mergeSorting( if (increase_sort_description_compile_attempts) increase_sort_description_compile_attempts = false; - auto tmp_data_on_disk = sort_settings.tmp_data - ? std::make_unique(sort_settings.tmp_data, CurrentMetrics::TemporaryFilesForSort) - : std::unique_ptr(); + TemporaryDataOnDiskScopePtr tmp_data_on_disk = nullptr; + if (sort_settings.tmp_data) + tmp_data_on_disk = sort_settings.tmp_data->childScope(CurrentMetrics::TemporaryFilesForSort); return std::make_shared( header, diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 68f23898018..2c54788b995 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -54,9 +54,9 @@ namespace class SourceFromNativeStream : public ISource { public: - explicit SourceFromNativeStream(TemporaryFileStream * tmp_stream_) - : ISource(tmp_stream_->getHeader()) - , tmp_stream(tmp_stream_) + explicit SourceFromNativeStream(const Block & header, TemporaryBlockStreamReaderHolder tmp_stream_) + : ISource(header) + , tmp_stream(std::move(tmp_stream_)) {} String getName() const override { return "SourceFromNativeStream"; } @@ -69,7 +69,7 @@ namespace auto block = tmp_stream->read(); if (!block) { - tmp_stream = nullptr; + tmp_stream.reset(); return {}; } return convertToChunk(block); @@ -78,7 +78,7 @@ namespace std::optional getReadProgress() override { return std::nullopt; } private: - TemporaryFileStream * tmp_stream; + TemporaryBlockStreamReaderHolder tmp_stream; }; } @@ -811,15 +811,18 @@ void AggregatingTransform::initGenerate() Pipes pipes; /// Merge external data from all aggregators used in query. - for (const auto & aggregator : *params->aggregator_list_ptr) + for (auto & aggregator : *params->aggregator_list_ptr) { - const auto & tmp_data = aggregator.getTemporaryData(); - for (auto * tmp_stream : tmp_data.getStreams()) - pipes.emplace_back(Pipe(std::make_unique(tmp_stream))); + auto & tmp_data = aggregator.getTemporaryData(); + num_streams += tmp_data.size(); - num_streams += tmp_data.getStreams().size(); - compressed_size += tmp_data.getStat().compressed_size; - uncompressed_size += tmp_data.getStat().uncompressed_size; + for (auto & tmp_stream : tmp_data) + { + auto stat = tmp_stream.finishWriting(); + compressed_size += stat.compressed_size; + uncompressed_size += stat.uncompressed_size; + pipes.emplace_back(Pipe(std::make_unique(tmp_stream.getHeader(), tmp_stream.getReadStream()))); + } } LOG_DEBUG( diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index c45192e7118..ba157dabffb 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -27,15 +27,20 @@ namespace ProfileEvents namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, LoggerPtr log_) + BufferingToFileTransform(const Block & header, TemporaryBlockStreamHolder tmp_stream_, LoggerPtr log_) : IAccumulatingTransform(header, header) - , tmp_stream(tmp_stream_) + , tmp_stream(std::move(tmp_stream_)) , log(log_) { - LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getPath()); + LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getHolder()->describeFilePath()); ProfileEvents::increment(ProfileEvents::ExternalSortWritePart); } @@ -44,14 +49,15 @@ public: void consume(Chunk chunk) override { Block block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns()); - tmp_stream.write(block); + tmp_stream->write(block); } Chunk generate() override { - if (!tmp_stream.isWriteFinished()) + if (!tmp_read_stream) { auto stat = tmp_stream.finishWriting(); + tmp_read_stream = tmp_stream.getReadStream(); ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, stat.compressed_size); ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, stat.uncompressed_size); @@ -59,10 +65,11 @@ public: ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, stat.uncompressed_size); LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ", - tmp_stream.getPath(), ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); + tmp_stream.getHolder()->describeFilePath(), + ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_stream.read(); + Block block = tmp_read_stream->read(); if (!block) return {}; @@ -71,7 +78,8 @@ public: } private: - TemporaryFileStream & tmp_stream; + TemporaryBlockStreamHolder tmp_stream; + TemporaryBlockStreamReaderHolder tmp_read_stream; LoggerPtr log; }; @@ -86,7 +94,7 @@ MergeSortingTransform::MergeSortingTransform( size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_) : SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts) , max_bytes_before_remerge(max_bytes_before_remerge_) @@ -168,9 +176,13 @@ void MergeSortingTransform::consume(Chunk chunk) */ if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort) { + if (!tmp_data) + throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDisk is not set for MergeSortingTransform"); + temporary_files_num++; + /// If there's less free disk space than reserve_size, an exception will be thrown size_t reserve_size = sum_bytes_in_blocks + min_free_disk_space; - auto & tmp_stream = tmp_data->createStream(header_without_constants, reserve_size); + TemporaryBlockStreamHolder tmp_stream(header_without_constants, tmp_data.get(), reserve_size); size_t max_merged_block_size = this->max_merged_block_size; if (max_block_bytes > 0 && sum_rows_in_blocks > 0 && sum_bytes_in_blocks > 0) { @@ -179,7 +191,7 @@ void MergeSortingTransform::consume(Chunk chunk) max_merged_block_size = std::max(std::min(max_merged_block_size, max_block_bytes / avg_row_bytes), 128UL); } merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); - auto current_processor = std::make_shared(header_without_constants, tmp_stream, log); + auto current_processor = std::make_shared(header_without_constants, std::move(tmp_stream), log); processors.emplace_back(current_processor); @@ -221,14 +233,14 @@ void MergeSortingTransform::generate() { if (!generated_prefix) { - size_t num_tmp_files = tmp_data ? tmp_data->getStreams().size() : 0; - if (num_tmp_files == 0) - merge_sorter - = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + if (temporary_files_num == 0) + { + merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + } else { ProfileEvents::increment(ProfileEvents::ExternalSortMerge); - LOG_INFO(log, "There are {} temporary sorted parts to merge", num_tmp_files); + LOG_INFO(log, "There are {} temporary sorted parts to merge", temporary_files_num); processors.emplace_back(std::make_shared( header_without_constants, std::move(chunks), description, max_merged_block_size, limit)); diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index a39dd66caa0..f7cb63d518b 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -29,7 +29,7 @@ public: size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_); String getName() const override { return "MergeSortingTransform"; } @@ -45,7 +45,8 @@ private: size_t max_bytes_before_remerge; double remerge_lowered_memory_bytes_ratio; size_t max_bytes_before_external_sort; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + size_t temporary_files_num = 0; size_t min_free_disk_space; size_t max_block_bytes; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index a9e5b1535c0..1e274a97a08 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,6 +197,12 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } + template + void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) + { + (resources.*field).push_back(std::move(resource)); + } + /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index 10f7f39ab09..ee2ecc25cd5 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,6 +13,7 @@ class QueryPlan; class Context; struct QueryIdHolder; +class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -33,6 +34,7 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; + std::vector> rows_sources_temporary_file; }; } diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 8a9ae05b355..52b56860543 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -162,15 +162,16 @@ void HTTPHandler::pushDelayedResults(Output & used_output) for (auto & write_buf : write_buffers) { - if (!write_buf) - continue; - - IReadableWriteBuffer * write_buf_concrete = dynamic_cast(write_buf.get()); - if (write_buf_concrete) + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) { - ReadBufferPtr reread_buf = write_buf_concrete->tryGetReadBuffer(); - if (reread_buf) - read_buffers.emplace_back(wrapReadBufferPointer(reread_buf)); + if (auto reread_buf = write_buf_concrete->read()) + read_buffers.emplace_back(std::move(reread_buf)); + } + + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) + { + if (auto reread_buf = write_buf_concrete->tryGetReadBuffer()) + read_buffers.emplace_back(std::move(reread_buf)); } } @@ -312,21 +313,19 @@ void HTTPHandler::processQuery( if (buffer_size_memory > 0 || buffer_until_eof) { - CascadeWriteBuffer::WriteBufferPtrs cascade_buffer1; - CascadeWriteBuffer::WriteBufferConstructors cascade_buffer2; + CascadeWriteBuffer::WriteBufferPtrs cascade_buffers; + CascadeWriteBuffer::WriteBufferConstructors cascade_buffers_lazy; if (buffer_size_memory > 0) - cascade_buffer1.emplace_back(std::make_shared(buffer_size_memory)); + cascade_buffers.emplace_back(std::make_shared(buffer_size_memory)); if (buffer_until_eof) { - auto tmp_data = std::make_shared(server.context()->getTempDataOnDisk()); - - auto create_tmp_disk_buffer = [tmp_data] (const WriteBufferPtr &) -> WriteBufferPtr { - return tmp_data->createRawStream(); - }; - - cascade_buffer2.emplace_back(std::move(create_tmp_disk_buffer)); + auto tmp_data = server.context()->getTempDataOnDisk(); + cascade_buffers_lazy.emplace_back([tmp_data](const WriteBufferPtr &) -> WriteBufferPtr + { + return std::make_unique(tmp_data.get()); + }); } else { @@ -342,10 +341,10 @@ void HTTPHandler::processQuery( return next_buffer; }; - cascade_buffer2.emplace_back(push_memory_buffer_and_continue); + cascade_buffers_lazy.emplace_back(push_memory_buffer_and_continue); } - used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffer1), std::move(cascade_buffer2)); + used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffers), std::move(cascade_buffers_lazy)); used_output.out_maybe_delayed_and_compressed = used_output.out_delayed_and_compressed_holder.get(); } else diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 74d6d60ba1b..5c9d4ea61a2 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -118,68 +118,6 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } -/// Manages the "rows_sources" temporary file that is used during vertical merge. -class RowsSourcesTemporaryFile : public ITemporaryFileLookup -{ -public: - /// A logical name of the temporary file under which it will be known to the plan steps that use it. - static constexpr auto FILE_ID = "rows_sources"; - - explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) - : tmp_disk(std::make_unique(temporary_data_on_disk_)) - , uncompressed_write_buffer(tmp_disk->createRawStream()) - , tmp_file_name_on_disk(uncompressed_write_buffer->getFileName()) - { - } - - WriteBuffer & getTemporaryFileForWriting(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (write_buffer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); - - write_buffer = (std::make_unique(*uncompressed_write_buffer)); - return *write_buffer; - } - - std::unique_ptr getTemporaryFileForReading(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (!finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); - - /// tmp_disk might not create real file if no data was written to it. - if (final_size == 0) - return std::make_unique(); - - /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. - auto raw_file_read_buffer = std::make_unique(tmp_file_name_on_disk); - return std::make_unique(std::move(raw_file_read_buffer)); - } - - /// Returns written data size in bytes - size_t finalizeWriting() - { - write_buffer->finalize(); - uncompressed_write_buffer->finalize(); - finalized = true; - final_size = write_buffer->count(); - return final_size; - } - -private: - std::unique_ptr tmp_disk; - std::unique_ptr uncompressed_write_buffer; - std::unique_ptr write_buffer; - const String tmp_file_name_on_disk; - bool finalized = false; - size_t final_size = 0; -}; - static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -480,7 +418,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); + ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -854,22 +792,11 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; - size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; - size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); - /// In special case, when there is only one source part, and no rows were skipped, we may have - /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total - /// number of input rows. - if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " - "of bytes written to rows_sources file ({}). It is a bug.", - sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->rows_sources_temporary_file->finishWriting(); ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -901,12 +828,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - const String & rows_sources_temporary_file_name_, + std::unique_ptr rows_sources_read_buf_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_read_buf(std::move(rows_sources_read_buf_)) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -914,15 +841,13 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { - const auto &header = pipeline.getHeader(); + const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - - auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); + if (!rows_sources_read_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); auto transform = std::make_unique( header, @@ -957,7 +882,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::unique_ptr rows_sources_read_buf; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -1008,7 +933,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - RowsSourcesTemporaryFile::FILE_ID, + ctx->rows_sources_temporary_file->read(), (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -1037,9 +962,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); + builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1401,7 +1326,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - const String & rows_sources_temporary_file_name_, + std::shared_ptr rows_sources_temporary_file_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1411,7 +1336,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_temporary_file(rows_sources_temporary_file_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1421,7 +1346,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1431,14 +1356,6 @@ public: const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - WriteBuffer * rows_sources_write_buf = nullptr; - if (!rows_sources_temporary_file_name.empty()) - { - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); - } - switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1451,14 +1368,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_write_buf, + rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1473,7 +1390,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, cleanup); break; @@ -1486,7 +1403,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; } @@ -1528,7 +1445,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::shared_ptr rows_sources_temporary_file; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1697,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1762,7 +1679,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 5a4fb1ec0b8..a3d72127627 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -41,7 +41,6 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; -class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -235,7 +234,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -314,7 +313,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 76bcf41d6d8..a53d4213cbd 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -113,10 +113,11 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", + LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", + prewhere_actions.dumpConditions(), prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From db2aab199db6e542c5a87c30466c358a2207c30a Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 11:57:11 +0000 Subject: [PATCH 085/566] log --- src/Interpreters/TemporaryDataOnDisk.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c3b24fb783b..6cc49fe83c8 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -58,6 +58,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -91,6 +92,7 @@ public: explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file '{}'", path_to_file); if (max_file_size > 0) { auto reservation = volume->reserve(max_file_size); @@ -129,9 +131,14 @@ public: try { if (disk->exists(path_to_file)) + { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); + } else + { LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } } catch (...) { From b09d3c5479edcecaa041df6e6de7a45a2d407aa8 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 12:01:21 +0000 Subject: [PATCH 086/566] fix --- src/Interpreters/Context.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 6ada12e63f9..f0e29dcdc41 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1256,6 +1256,10 @@ try /// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types. } } + else + { + fs::create_directories(path); + } } catch (...) { From f238530cc5d222f62611214a9434138d79aabefd Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:10:39 +0000 Subject: [PATCH 087/566] w --- src/Interpreters/Aggregator.cpp | 5 ++++ src/Interpreters/Aggregator.h | 2 +- src/Interpreters/GraceHashJoin.cpp | 4 +-- src/Interpreters/HashJoin/HashJoin.cpp | 15 +++++------ src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Interpreters/TemporaryDataOnDisk.h | 25 ++++++++++--------- .../Transforms/MergeSortingTransform.cpp | 4 +-- src/Storages/MergeTree/MergeTask.cpp | 2 +- 8 files changed, 33 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e6fecc37cfa..cdc819d3a32 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1639,6 +1639,11 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } +std::vector & Aggregator::getTemporaryData() +{ + return tmp_files; +} + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index bc28d3dccb8..3ac5ca30ed4 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -311,7 +311,7 @@ public: bool hasTemporaryData() const { return !tmp_files.empty(); } - std::vector & getTemporaryData() { return tmp_files; } + std::vector & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index a2010b7d94b..3fb83c3ce47 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -389,8 +389,8 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); - TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); + TemporaryBlockStreamHolder left_file(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file(prepareRightBlock(right_sample_block), tmp_data.get()); BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index af23b520abb..a2c9f94a6ae 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -59,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; }; @@ -513,9 +513,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { if (!tmp_stream) - tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream.emplace(right_sample_block, tmp_data.get()); - tmp_stream->write(block_to_save); + tmp_stream.value()->write(block_to_save); return true; } @@ -721,13 +721,14 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; if (not_processed) { auto & continuation = static_cast(*not_processed); start_left_row = continuation.left_position; start_right_block = continuation.right_block; - reader = std::move(continuation.reader); + if (continuation.reader) + reader = std::move(*continuation.reader); not_processed.reset(); } @@ -796,9 +797,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { if (!reader) - reader = tmp_stream.getReadStream(); + reader = tmp_stream->getReadStream(); - while (auto block_right = reader->read()) + while (auto block_right = reader.value()->read()) { ++block_number; process_right_block(block_right); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 0f50e110db9..8572c5df096 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -424,7 +424,7 @@ private: /// Needed to do external cross join TemporaryDataOnDiskScopePtr tmp_data; - TemporaryBlockStreamHolder tmp_stream; + std::optional tmp_stream; mutable std::once_flag finish_writing; /// Block with columns from the right-side table. diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index f8d14b00ac5..86fa9e57e81 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -114,18 +114,19 @@ template class WrapperGuard { public: - WrapperGuard() = default; - template WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) - {} + { + chassert(holder); + chassert(impl); + } - Impl * operator->() { return impl.get(); } - const Impl * operator->() const { return impl.get(); } - Impl & operator*() { return *impl; } - const Impl & operator*() const { return *impl; } + Impl * operator->() { chassert(impl); chassert(holder); return impl.get(); } + const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } + Impl & operator*() { chassert(impl); chassert(holder); return *impl; } + const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } operator bool() const { return impl != nullptr; } const Holder * getHolder() const { return holder.get(); } @@ -153,13 +154,13 @@ public: virtual std::unique_ptr write() = 0; virtual std::unique_ptr read(size_t buffer_size) const = 0; - /// Get location for logging purposes + /// Get location for logging virtual String describeFilePath() const = 0; virtual ~TemporaryFileHolder() = default; }; - +/// Reads raw data from temporary file class TemporaryDataReadBuffer : public ReadBuffer { public: @@ -173,7 +174,7 @@ private: WrapperGuard compressed_buf; }; -/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +/// Writes raw data to buffer provided by file_holder, and accounts amount of written data in parent scope. class TemporaryDataBuffer : public WriteBuffer { public: @@ -206,13 +207,13 @@ private: Stat stat; }; + +/// High level interfaces for reading and writing temporary data by blocks. using TemporaryBlockStreamReaderHolder = WrapperGuard; class TemporaryBlockStreamHolder : public WrapperGuard { public: - TemporaryBlockStreamHolder() = default; - TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); TemporaryBlockStreamReaderHolder getReadStream() const; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ba157dabffb..d3299ea651f 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -69,7 +69,7 @@ public: ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_read_stream->read(); + Block block = tmp_read_stream.value()->read(); if (!block) return {}; @@ -79,7 +79,7 @@ public: private: TemporaryBlockStreamHolder tmp_stream; - TemporaryBlockStreamReaderHolder tmp_read_stream; + std::optional tmp_read_stream; LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 5c9d4ea61a2..1009458574e 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -964,7 +964,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); + builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } From 017d9557c5c6d41c671d55c21fb2e8810d231dd3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:33:33 +0000 Subject: [PATCH 088/566] f --- src/Interpreters/Aggregator.cpp | 12 +++++++++++- src/Interpreters/Aggregator.h | 7 ++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index cdc819d3a32..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1519,7 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); + std::unique_lock lk(tmp_files_mutex); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); + lk.unlock(); + ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); @@ -1639,11 +1642,18 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } -std::vector & Aggregator::getTemporaryData() +std::list & Aggregator::getTemporaryData() { return tmp_files; } +bool Aggregator::hasTemporaryData() const +{ + std::lock_guard lk(tmp_files_mutex); + return !tmp_files.empty(); +} + + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3ac5ca30ed4..451583946eb 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return !tmp_files.empty(); } + bool hasTemporaryData() const; - std::vector & getTemporaryData(); + std::list & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; @@ -356,7 +356,8 @@ private: /// For external aggregation. TemporaryDataOnDiskScopePtr tmp_data; - mutable std::vector tmp_files; + mutable std::mutex tmp_files_mutex; + mutable std::list tmp_files; size_t min_bytes_for_prefetch = 0; From a5b9083f2c2f03345f1b14630d9bae8c25996697 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 18 Oct 2024 14:40:47 +0000 Subject: [PATCH 089/566] f --- src/Interpreters/TemporaryDataOnDisk.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 6cc49fe83c8..c0c9d0a80c5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -9,13 +9,14 @@ #include #include #include +#include #include #include #include #include #include -#include "Common/Exception.h" +#include namespace ProfileEvents { @@ -130,7 +131,7 @@ public: ~TemporaryFileOnLocalDisk() override try { - if (disk->exists(path_to_file)) + if (disk->existsFile(path_to_file)) { LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); From 7d81ecb1835e7818020ad795e63d20372b6bf9ce Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 02:16:50 +0200 Subject: [PATCH 090/566] Parallel compression --- src/Common/CurrentMetrics.cpp | 4 + src/Compression/CompressedWriteBuffer.cpp | 1 - src/Compression/ICompressionCodec.cpp | 11 ++ .../ParallelCompressedWriteBuffer.cpp | 118 ++++++++++++++++++ .../ParallelCompressedWriteBuffer.h | 87 +++++++++++++ 5 files changed, 220 insertions(+), 1 deletion(-) create mode 100644 src/Compression/ParallelCompressedWriteBuffer.cpp create mode 100644 src/Compression/ParallelCompressedWriteBuffer.h diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index bd62e7e8aae..da3b5557dbf 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -41,6 +41,10 @@ M(PostgreSQLConnection, "Number of client connections using PostgreSQL protocol") \ M(OpenFileForRead, "Number of files open for reading") \ M(OpenFileForWrite, "Number of files open for writing") \ + M(Compressing, "Number of compress operations using internal compression codecs") \ + M(Decompressing, "Number of decompress operations using internal compression codecs") \ + M(ParallelCompressedWriteBufferThreads, "Number of threads in all instances of ParallelCompressedWriteBuffer - these threads are doing parallel compression and writing") \ + M(ParallelCompressedWriteBufferWait, "Number of threads in all instances of ParallelCompressedWriteBuffer that are currently waiting for buffer to become available for writing") \ M(TotalTemporaryFiles, "Number of temporary files created") \ M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \ M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \ diff --git a/src/Compression/CompressedWriteBuffer.cpp b/src/Compression/CompressedWriteBuffer.cpp index c3acfcb7da6..b6dab2a190e 100644 --- a/src/Compression/CompressedWriteBuffer.cpp +++ b/src/Compression/CompressedWriteBuffer.cpp @@ -2,7 +2,6 @@ #include #include -#include #include #include diff --git a/src/Compression/ICompressionCodec.cpp b/src/Compression/ICompressionCodec.cpp index 418667a3a8f..a31d0485982 100644 --- a/src/Compression/ICompressionCodec.cpp +++ b/src/Compression/ICompressionCodec.cpp @@ -5,11 +5,18 @@ #include #include #include +#include #include #include #include +namespace CurrentMetrics +{ + extern const Metric Compressing; + extern const Metric Decompressing; +} + namespace DB { @@ -80,6 +87,8 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char { assert(source != nullptr && dest != nullptr); + CurrentMetrics::Increment metric_increment(CurrentMetrics::Compressing); + dest[0] = getMethodByte(); UInt8 header_size = getHeaderSize(); /// Write data from header_size @@ -93,6 +102,8 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch { assert(source != nullptr && dest != nullptr); + CurrentMetrics::Increment metric_increment(CurrentMetrics::Decompressing); + UInt8 header_size = getHeaderSize(); if (source_size < header_size) throw Exception(decompression_error_code, diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp new file mode 100644 index 00000000000..270c331e4df --- /dev/null +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -0,0 +1,118 @@ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + + +namespace CurrentMetrics +{ + extern const Metric ParallelCompressedWriteBufferThreads; + extern const Metric ParallelCompressedWriteBufferWait; +} + +namespace DB +{ + +ParallelCompressedWriteBuffer::ParallelCompressedWriteBuffer( + WriteBuffer & out_, + CompressionCodecPtr codec_, + size_t buf_size_, + size_t num_threads_, + ThreadPool & pool_) + : WriteBuffer(nullptr, 0), out(out_), codec(codec_), buf_size(buf_size_), num_threads(num_threads_), pool(pool_) +{ + buffers.emplace_back(buf_size); + current_buffer = buffers.begin(); + BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0); +} + +void ParallelCompressedWriteBuffer::nextImpl() +{ + if (!offset()) + return; + + std::unique_lock lock(mutex); + + /// The buffer will be compressed and processed in the thread. + current_buffer->busy = true; + pool.trySchedule([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] + { + SCOPE_EXIT_SAFE( + if (thread_group) + CurrentThread::detachFromGroupIfNotDetached(); + ); + + if (thread_group) + CurrentThread::attachToGroupIfDetached(thread_group); + setThreadName("ParallelCompres"); + + compress(my_current_buffer); + }); + + const BufferPair * previous_buffer = &*current_buffer; + ++current_buffer; + if (current_buffer == buffers.end()) + { + if (buffers.size() < num_threads) + { + /// If we didn't use all num_threads buffers yet, create a new one. + current_buffer = buffers.emplace(current_buffer, buf_size); + } + else + { + /// Otherwise, wrap around to the first buffer in the list. + current_buffer = buffers.begin(); + } + } + + /// Wait while the buffer becomes not busy + { + CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); + cond.wait(lock, [&]{ return !current_buffer->busy; }); + } + + /// Now this buffer can be used. + current_buffer->previous = previous_buffer; + BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0); +} + +void ParallelCompressedWriteBuffer::compress(Iterator buffer) +{ + CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferThreads); + + chassert(offset() <= INT_MAX); + UInt32 decompressed_size = static_cast(offset()); + UInt32 compressed_reserve_size = codec->getCompressedReserveSize(decompressed_size); + + buffer->compressed.resize(compressed_reserve_size); + UInt32 compressed_size = codec->compress(working_buffer.begin(), decompressed_size, buffer->compressed.data()); + + CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); + + /// Wait while all previous buffers have been written. + { + CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); + std::unique_lock lock(mutex); + cond.wait(lock, [&]{ return !buffer->previous || !buffer->previous->busy; }); + } + + writeBinaryLittleEndian(checksum.low64, out); + writeBinaryLittleEndian(checksum.high64, out); + + out.write(buffer->compressed.data(), compressed_size); + + std::unique_lock lock(mutex); + buffer->busy = false; + cond.notify_all(); +} + +} diff --git a/src/Compression/ParallelCompressedWriteBuffer.h b/src/Compression/ParallelCompressedWriteBuffer.h new file mode 100644 index 00000000000..e824dcacb46 --- /dev/null +++ b/src/Compression/ParallelCompressedWriteBuffer.h @@ -0,0 +1,87 @@ +#pragma once + +#include +#include + +#include + +#include +#include +#include +#include +#include + + +namespace DB +{ + +/** Uses multi-buffering for parallel compression. + * When the buffer is filled, it will be compressed in the background, + * and a new buffer is created for the next input data. + */ +class ParallelCompressedWriteBuffer final : public WriteBuffer +{ +public: + explicit ParallelCompressedWriteBuffer( + WriteBuffer & out_, + CompressionCodecPtr codec_, + size_t buf_size_, + size_t num_threads_, + ThreadPool & pool_); + + ~ParallelCompressedWriteBuffer() override; + + /// The amount of compressed data + size_t getCompressedBytes() + { + nextIfAtEnd(); + return out.count(); + } + + /// How many uncompressed bytes were written to the buffer + size_t getUncompressedBytes() + { + return count(); + } + + /// How many bytes are in the buffer (not yet compressed) + size_t getRemainingBytes() + { + nextIfAtEnd(); + return offset(); + } + +private: + void nextImpl() override; + void finalizeImpl() override; + + WriteBuffer & out; + CompressionCodecPtr codec; + size_t buf_size; + size_t num_threads; + ThreadPool & pool; + + struct BufferPair + { + BufferPair(size_t input_size) + : uncompressed(input_size) + { + } + + Memory<> uncompressed; + PODArray compressed; + const BufferPair * previous = nullptr; + bool busy = false; + }; + + std::mutex mutex; + std::condition_variable cond; + std::list buffers; + + using Iterator = std::list::iterator; + Iterator current_buffer; + + void compress(Iterator buffer); +}; + +} From 5e433ea537d42aca8fa1076f7c054b1b3dc83854 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 03:11:16 +0200 Subject: [PATCH 091/566] Parallel compression: development --- programs/compressor/Compressor.cpp | 40 ++++++++++++++++--- .../ParallelCompressedWriteBuffer.cpp | 38 +++++++++++++++--- .../ParallelCompressedWriteBuffer.h | 3 ++ 3 files changed, 69 insertions(+), 12 deletions(-) diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 050bb495024..aac56fba94a 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -17,6 +18,8 @@ #include #include #include +#include +#include #include @@ -29,6 +32,13 @@ namespace DB } } +namespace CurrentMetrics +{ + extern const Metric LocalThread; + extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; +} + namespace { @@ -77,12 +87,13 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) ("decompress,d", "decompress") ("offset-in-compressed-file", po::value()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)") ("offset-in-decompressed-block", po::value()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)") - ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") + ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") ("hc", "use LZ4HC instead of LZ4") ("zstd", "use ZSTD instead of LZ4") ("deflate_qpl", "use deflate_qpl instead of LZ4") ("codec", po::value>()->multitoken(), "use codecs combination instead of LZ4") ("level", po::value(), "compression level for codecs specified via flags") + ("threads", po::value()->default_value(1), "number of threads for parallel compression") ("none", "use no compression instead of LZ4") ("stat", "print block statistics of compressed data") ("stacktrace", "print stacktrace of exception") @@ -111,7 +122,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) bool stat_mode = options.count("stat"); bool use_none = options.count("none"); print_stacktrace = options.count("stacktrace"); - unsigned block_size = options["block-size"].as(); + size_t block_size = options["block-size"].as(); + size_t num_threads = options["threads"].as(); std::vector codecs; if (options.count("codec")) codecs = options["codec"].as>(); @@ -119,6 +131,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) if ((use_lz4hc || use_zstd || use_deflate_qpl || use_none) && !codecs.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive"); + if (num_threads < 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid value of `threads` parameter"); + + if (num_threads > 1 && decompress) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel mode is only implemented for compression (not for decompression)"); + if (!codecs.empty() && options.count("level")) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list"); @@ -149,7 +167,6 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) else codec = CompressionCodecFactory::instance().get(method_family, level); - std::unique_ptr rb; std::unique_ptr wb; @@ -190,9 +207,20 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) else { /// Compression - CompressedWriteBuffer to(*wb, codec, block_size); - copyData(*rb, to); - to.finalize(); + + if (num_threads == 1) + { + CompressedWriteBuffer to(*wb, codec, block_size); + copyData(*rb, to); + to.finalize(); + } + else + { + ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads); + ParallelCompressedWriteBuffer to(*wb, codec, block_size, num_threads, pool); + copyData(*rb, to); + to.finalize(); + } } } catch (...) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 270c331e4df..4ffb6056d18 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -1,8 +1,9 @@ #include -#include +#include #include #include +#include #include #include @@ -30,6 +31,7 @@ ParallelCompressedWriteBuffer::ParallelCompressedWriteBuffer( ThreadPool & pool_) : WriteBuffer(nullptr, 0), out(out_), codec(codec_), buf_size(buf_size_), num_threads(num_threads_), pool(pool_) { + std::cerr << getThreadId() << " Create a new buffer 1\n"; buffers.emplace_back(buf_size); current_buffer = buffers.begin(); BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0); @@ -44,6 +46,9 @@ void ParallelCompressedWriteBuffer::nextImpl() /// The buffer will be compressed and processed in the thread. current_buffer->busy = true; + current_buffer->sequence_num = current_sequence_num; + ++current_sequence_num; + current_buffer->uncompressed_size = offset(); pool.trySchedule([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] { SCOPE_EXIT_SAFE( @@ -65,15 +70,19 @@ void ParallelCompressedWriteBuffer::nextImpl() if (buffers.size() < num_threads) { /// If we didn't use all num_threads buffers yet, create a new one. + std::cerr << getThreadId() << " Create a new buffer " << (buffers.size() + 1) << "\n"; current_buffer = buffers.emplace(current_buffer, buf_size); } else { /// Otherwise, wrap around to the first buffer in the list. + std::cerr << getThreadId() << " Wrap around\n"; current_buffer = buffers.begin(); } } + if (current_buffer->busy) + std::cerr << getThreadId() << " Wait while the buffer " << current_buffer->sequence_num << " becomes not busy\n"; /// Wait while the buffer becomes not busy { CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); @@ -85,26 +94,37 @@ void ParallelCompressedWriteBuffer::nextImpl() BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0); } +void ParallelCompressedWriteBuffer::finalizeImpl() +{ + next(); + pool.wait(); +} + void ParallelCompressedWriteBuffer::compress(Iterator buffer) { + std::cerr << getThreadId() << " Compressing " << buffer->sequence_num << "...\n"; CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferThreads); - chassert(offset() <= INT_MAX); - UInt32 decompressed_size = static_cast(offset()); - UInt32 compressed_reserve_size = codec->getCompressedReserveSize(decompressed_size); + chassert(buffer->uncompressed_size <= INT_MAX); + UInt32 uncompressed_size = static_cast(buffer->uncompressed_size); + UInt32 compressed_reserve_size = codec->getCompressedReserveSize(uncompressed_size); buffer->compressed.resize(compressed_reserve_size); - UInt32 compressed_size = codec->compress(working_buffer.begin(), decompressed_size, buffer->compressed.data()); + UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, buffer->compressed.data()); CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); + if (buffer->previous && buffer->previous->busy) + std::cerr << getThreadId() << " Compressed " << buffer->sequence_num << ", waiting for prev buffer to be written...\n"; /// Wait while all previous buffers have been written. { CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); std::unique_lock lock(mutex); - cond.wait(lock, [&]{ return !buffer->previous || !buffer->previous->busy; }); + cond.wait(lock, [&]{ return !buffer->previous || !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); } + std::cerr << getThreadId() << " Writing " << buffer->sequence_num << "...\n"; + writeBinaryLittleEndian(checksum.low64, out); writeBinaryLittleEndian(checksum.high64, out); @@ -115,4 +135,10 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) cond.notify_all(); } +ParallelCompressedWriteBuffer::~ParallelCompressedWriteBuffer() +{ + if (!canceled) + finalize(); +} + } diff --git a/src/Compression/ParallelCompressedWriteBuffer.h b/src/Compression/ParallelCompressedWriteBuffer.h index e824dcacb46..ade49837f6b 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.h +++ b/src/Compression/ParallelCompressedWriteBuffer.h @@ -69,8 +69,10 @@ private: } Memory<> uncompressed; + size_t uncompressed_size = 0; PODArray compressed; const BufferPair * previous = nullptr; + size_t sequence_num = 0; bool busy = false; }; @@ -80,6 +82,7 @@ private: using Iterator = std::list::iterator; Iterator current_buffer; + size_t current_sequence_num = 0; void compress(Iterator buffer); }; From 7229ffd507db02a2b4c2a468ce1ac3dfcff26901 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 03:15:10 +0200 Subject: [PATCH 092/566] Parallel compression: development --- src/Compression/ParallelCompressedWriteBuffer.cpp | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 4ffb6056d18..30eaba33570 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -1,9 +1,7 @@ #include -#include #include #include -#include #include #include @@ -31,7 +29,6 @@ ParallelCompressedWriteBuffer::ParallelCompressedWriteBuffer( ThreadPool & pool_) : WriteBuffer(nullptr, 0), out(out_), codec(codec_), buf_size(buf_size_), num_threads(num_threads_), pool(pool_) { - std::cerr << getThreadId() << " Create a new buffer 1\n"; buffers.emplace_back(buf_size); current_buffer = buffers.begin(); BufferBase::set(current_buffer->uncompressed.data(), buf_size, 0); @@ -70,19 +67,15 @@ void ParallelCompressedWriteBuffer::nextImpl() if (buffers.size() < num_threads) { /// If we didn't use all num_threads buffers yet, create a new one. - std::cerr << getThreadId() << " Create a new buffer " << (buffers.size() + 1) << "\n"; current_buffer = buffers.emplace(current_buffer, buf_size); } else { /// Otherwise, wrap around to the first buffer in the list. - std::cerr << getThreadId() << " Wrap around\n"; current_buffer = buffers.begin(); } } - if (current_buffer->busy) - std::cerr << getThreadId() << " Wait while the buffer " << current_buffer->sequence_num << " becomes not busy\n"; /// Wait while the buffer becomes not busy { CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); @@ -102,7 +95,6 @@ void ParallelCompressedWriteBuffer::finalizeImpl() void ParallelCompressedWriteBuffer::compress(Iterator buffer) { - std::cerr << getThreadId() << " Compressing " << buffer->sequence_num << "...\n"; CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferThreads); chassert(buffer->uncompressed_size <= INT_MAX); @@ -114,8 +106,6 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); - if (buffer->previous && buffer->previous->busy) - std::cerr << getThreadId() << " Compressed " << buffer->sequence_num << ", waiting for prev buffer to be written...\n"; /// Wait while all previous buffers have been written. { CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); @@ -123,8 +113,6 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) cond.wait(lock, [&]{ return !buffer->previous || !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); } - std::cerr << getThreadId() << " Writing " << buffer->sequence_num << "...\n"; - writeBinaryLittleEndian(checksum.low64, out); writeBinaryLittleEndian(checksum.high64, out); From 66024821cf591d790ba1017b25dc4ebd75e8ea41 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 03:23:07 +0200 Subject: [PATCH 093/566] Parallel compression: development --- src/Compression/ParallelCompressedWriteBuffer.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 30eaba33570..954fae242e4 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -77,6 +77,7 @@ void ParallelCompressedWriteBuffer::nextImpl() } /// Wait while the buffer becomes not busy + if (current_buffer->busy) { CurrentMetrics::Increment metric_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); cond.wait(lock, [&]{ return !current_buffer->busy; }); @@ -107,10 +108,11 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); /// Wait while all previous buffers have been written. + if (buffer->previous) { CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); std::unique_lock lock(mutex); - cond.wait(lock, [&]{ return !buffer->previous || !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); + cond.wait(lock, [&]{ return !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); } writeBinaryLittleEndian(checksum.low64, out); From d6e0da177744a9890ff269aad9168725f6a717c3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 03:33:42 +0200 Subject: [PATCH 094/566] Less memcpy --- .../ParallelCompressedWriteBuffer.cpp | 54 +++++++++++++++---- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 954fae242e4..1041a14979e 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -102,23 +102,55 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) UInt32 uncompressed_size = static_cast(buffer->uncompressed_size); UInt32 compressed_reserve_size = codec->getCompressedReserveSize(uncompressed_size); - buffer->compressed.resize(compressed_reserve_size); - UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, buffer->compressed.data()); + /// If all previous buffers have been written, + /// and if the output buffer has the required capacity, + /// we can compress data directly into the output buffer. + size_t required_out_capacity = compressed_reserve_size + sizeof(CityHash_v1_0_2::uint128); + bool can_write_directly = false; - CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); - - /// Wait while all previous buffers have been written. - if (buffer->previous) + if (!buffer->previous) + { + can_write_directly = out.available() >= required_out_capacity; + } + else { - CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); std::unique_lock lock(mutex); - cond.wait(lock, [&]{ return !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); + can_write_directly = (!buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num) + && out.available() >= required_out_capacity; } - writeBinaryLittleEndian(checksum.low64, out); - writeBinaryLittleEndian(checksum.high64, out); + if (can_write_directly) + { + char * out_compressed_ptr = out.position() + sizeof(CityHash_v1_0_2::uint128); + UInt32 compressed_size = codec->compress(working_buffer.begin(), uncompressed_size, out_compressed_ptr); - out.write(buffer->compressed.data(), compressed_size); + CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size); + + writeBinaryLittleEndian(checksum.low64, out); + writeBinaryLittleEndian(checksum.high64, out); + + out.position() += compressed_size; + } + else + { + buffer->compressed.resize(compressed_reserve_size); + UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, buffer->compressed.data()); + + CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(buffer->compressed.data(), compressed_size); + + /// Wait while all previous buffers have been written. + if (buffer->previous) + { + CurrentMetrics::Increment metric_wait_increment(CurrentMetrics::ParallelCompressedWriteBufferWait); + std::unique_lock lock(mutex); + cond.wait(lock, [&]{ return !buffer->previous->busy || buffer->previous->sequence_num > buffer->sequence_num; }); + } + + writeBinaryLittleEndian(checksum.low64, out); + writeBinaryLittleEndian(checksum.high64, out); + + out.write(buffer->compressed.data(), compressed_size); + } std::unique_lock lock(mutex); buffer->busy = false; From b03d055aab13526bd25b3ea63017f02314cd8f67 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 18:19:16 +0200 Subject: [PATCH 095/566] Fix clang-tidy --- src/Compression/ParallelCompressedWriteBuffer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.h b/src/Compression/ParallelCompressedWriteBuffer.h index ade49837f6b..4d1dfc79797 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.h +++ b/src/Compression/ParallelCompressedWriteBuffer.h @@ -63,7 +63,7 @@ private: struct BufferPair { - BufferPair(size_t input_size) + explicit BufferPair(size_t input_size) : uncompressed(input_size) { } From 8f038e2e1cd3c6635f328c7bbece1573c225dfb3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 23:08:22 +0200 Subject: [PATCH 096/566] Preparation --- .../MergeTree/MergeTreeDataPartWriterOnDisk.cpp | 10 +++++----- src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h | 5 +++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 58a67fc4ba2..a006e2da368 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -34,7 +34,7 @@ void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() /// Also the order is important compressed_hashing.finalize(); - compressor.finalize(); + compressor->finalize(); plain_hashing.finalize(); if constexpr (!only_plain_file) @@ -92,8 +92,8 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( marks_file_extension{marks_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), - compressed_hashing(compressor), + compressor(std::make_unique(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), + compressed_hashing(*compressor), marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), marks_hashing(*marks_file), marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), @@ -115,8 +115,8 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( data_file_extension{data_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), - compressed_hashing(compressor), + compressor(std::make_unique(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), + compressed_hashing(*compressor), compress_marks(false) { } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 8d84442981e..3b6687dff99 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -44,7 +44,7 @@ public: /// Helper class, which holds chain of buffers to write data file with marks. /// It is used to write: one column, skip index or all columns (in compact format). - template + template struct Stream { Stream( @@ -76,7 +76,8 @@ public: /// compressed_hashing -> compressor -> plain_hashing -> plain_file std::unique_ptr plain_file; HashingWriteBuffer plain_hashing; - CompressedWriteBuffer compressor; + /// This could be either CompressedWriteBuffer or ParallelCompressedWriteBuffer + std::unique_ptr compressor; HashingWriteBuffer compressed_hashing; /// marks_compressed_hashing -> marks_compressor -> marks_hashing -> marks_file From 1236422559c5fd957f99e65f08e25d7a9806190e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 20 Oct 2024 23:28:23 +0200 Subject: [PATCH 097/566] Templates are shit --- .../MergeTreeDataPartWriterOnDisk.cpp | 83 +++++++++---------- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 12 ++- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 4 +- 3 files changed, 44 insertions(+), 55 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index a006e2da368..c250726aba1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -25,8 +25,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -template -void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() +void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() { /// Here the main goal is to do preFinalize calls for plain_file and marks_file /// Before that all hashing and compression buffers have to be finalized @@ -37,45 +36,42 @@ void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() compressor->finalize(); plain_hashing.finalize(); - if constexpr (!only_plain_file) + if (marks_hashing) { if (compress_marks) { - marks_compressed_hashing.finalize(); - marks_compressor.finalize(); + marks_compressed_hashing->finalize(); + marks_compressor->finalize(); } - marks_hashing.finalize(); + marks_hashing->finalize(); } plain_file->preFinalize(); - if constexpr (!only_plain_file) + if (marks_file) marks_file->preFinalize(); is_prefinalized = true; } -template -void MergeTreeDataPartWriterOnDisk::Stream::finalize() +void MergeTreeDataPartWriterOnDisk::Stream::finalize() { if (!is_prefinalized) preFinalize(); plain_file->finalize(); - if constexpr (!only_plain_file) + if (marks_file) marks_file->finalize(); } -template -void MergeTreeDataPartWriterOnDisk::Stream::sync() const +void MergeTreeDataPartWriterOnDisk::Stream::sync() const { plain_file->sync(); - if constexpr (!only_plain_file) + if (marks_file) marks_file->sync(); } -template<> -MergeTreeDataPartWriterOnDisk::Stream::Stream( +MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, @@ -94,16 +90,15 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( plain_hashing(*plain_file), compressor(std::make_unique(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), compressed_hashing(*compressor), - marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), - marks_hashing(*marks_file), - marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), - marks_compressed_hashing(marks_compressor), compress_marks(MarkType(marks_file_extension).compressed) { + marks_file = data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings); + marks_hashing.emplace(*marks_file); + marks_compressor.emplace(*marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size); + marks_compressed_hashing.emplace(*marks_compressor); } -template<> -MergeTreeDataPartWriterOnDisk::Stream::Stream( +MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, @@ -121,8 +116,7 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( { } -template -void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums) +void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums) { String name = escaped_column_name; @@ -132,17 +126,17 @@ void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(Merg checksums.files[name + data_file_extension].file_size = plain_hashing.count(); checksums.files[name + data_file_extension].file_hash = plain_hashing.getHash(); - if constexpr (!only_plain_file) + if (marks_hashing) { if (compress_marks) { checksums.files[name + marks_file_extension].is_compressed = true; - checksums.files[name + marks_file_extension].uncompressed_size = marks_compressed_hashing.count(); - checksums.files[name + marks_file_extension].uncompressed_hash = marks_compressed_hashing.getHash(); + checksums.files[name + marks_file_extension].uncompressed_size = marks_compressed_hashing->count(); + checksums.files[name + marks_file_extension].uncompressed_hash = marks_compressed_hashing->getHash(); } - checksums.files[name + marks_file_extension].file_size = marks_hashing.count(); - checksums.files[name + marks_file_extension].file_hash = marks_hashing.getHash(); + checksums.files[name + marks_file_extension].file_size = marks_hashing->count(); + checksums.files[name + marks_file_extension].file_hash = marks_hashing->getHash(); } } @@ -276,12 +270,12 @@ void MergeTreeDataPartWriterOnDisk::initStatistics() for (const auto & stat_ptr : stats) { String stats_name = stat_ptr->getFileName(); - stats_streams.emplace_back(std::make_unique>( - stats_name, - data_part_storage, - stats_name, STATS_FILE_SUFFIX, - default_codec, settings.max_compress_block_size, - settings.query_write_settings)); + stats_streams.emplace_back(std::make_unique( + stats_name, + data_part_storage, + stats_name, STATS_FILE_SUFFIX, + default_codec, settings.max_compress_block_size, + settings.query_write_settings)); } } @@ -298,14 +292,14 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() { String stream_name = skip_index->getFileName(); skip_indices_streams.emplace_back( - std::make_unique>( - stream_name, - data_part_storage, - stream_name, skip_index->getSerializedFileExtension(), - stream_name, marks_file_extension, - default_codec, settings.max_compress_block_size, - marks_compression_codec, settings.marks_compress_block_size, - settings.query_write_settings)); + std::make_unique( + stream_name, + data_part_storage, + stream_name, skip_index->getSerializedFileExtension(), + stream_name, marks_file_extension, + default_codec, settings.max_compress_block_size, + marks_compression_codec, settings.marks_compress_block_size, + settings.query_write_settings)); GinIndexStorePtr store = nullptr; if (typeid_cast(&*skip_index) != nullptr) @@ -381,7 +375,7 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { const auto index_helper = skip_indices[i]; auto & stream = *skip_indices_streams[i]; - WriteBuffer & marks_out = stream.compress_marks ? stream.marks_compressed_hashing : stream.marks_hashing; + WriteBuffer & marks_out = stream.compress_marks ? *stream.marks_compressed_hashing : *stream.marks_hashing; GinIndexStorePtr store; if (typeid_cast(&*index_helper) != nullptr) @@ -564,7 +558,4 @@ Names MergeTreeDataPartWriterOnDisk::getSkipIndicesColumns() const return Names(skip_indexes_column_names_set.begin(), skip_indexes_column_names_set.end()); } -template struct MergeTreeDataPartWriterOnDisk::Stream; -template struct MergeTreeDataPartWriterOnDisk::Stream; - } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 3b6687dff99..0d80333368d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -44,7 +44,6 @@ public: /// Helper class, which holds chain of buffers to write data file with marks. /// It is used to write: one column, skip index or all columns (in compact format). - template struct Stream { Stream( @@ -82,9 +81,9 @@ public: /// marks_compressed_hashing -> marks_compressor -> marks_hashing -> marks_file std::unique_ptr marks_file; - std::conditional_t marks_hashing; - std::conditional_t marks_compressor; - std::conditional_t marks_compressed_hashing; + std::optional marks_hashing; + std::optional marks_compressor; + std::optional marks_compressed_hashing; bool compress_marks; bool is_prefinalized = false; @@ -98,8 +97,7 @@ public: void addToChecksums(MergeTreeDataPartChecksums & checksums); }; - using StreamPtr = std::unique_ptr>; - using StatisticStreamPtr = std::unique_ptr>; + using StreamPtr = std::unique_ptr; MergeTreeDataPartWriterOnDisk( const String & data_part_name_, @@ -157,7 +155,7 @@ protected: const MergeTreeIndices skip_indices; const ColumnsStatistics stats; - std::vector stats_streams; + std::vector stats_streams; const String marks_file_extension; const CompressionCodecPtr default_codec; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 459ddc1ca79..d1d4aa4f5b0 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -187,7 +187,7 @@ void MergeTreeDataPartWriterWide::addStreams( query_write_settings.use_adaptive_write_buffer = settings.use_adaptive_write_buffer_for_dynamic_subcolumns && ISerialization::isDynamicSubcolumn(substream_path, substream_path.size()); query_write_settings.adaptive_write_buffer_initial_size = settings.adaptive_write_buffer_initial_size; - column_streams[stream_name] = std::make_unique>( + column_streams[stream_name] = std::make_unique( stream_name, data_part_storage, stream_name, DATA_FILE_EXTENSION, @@ -362,7 +362,7 @@ void MergeTreeDataPartWriterWide::writeSingleMark( void MergeTreeDataPartWriterWide::flushMarkToFile(const StreamNameAndMark & stream_with_mark, size_t rows_in_mark) { auto & stream = *column_streams[stream_with_mark.stream_name]; - WriteBuffer & marks_out = stream.compress_marks ? stream.marks_compressed_hashing : stream.marks_hashing; + WriteBuffer & marks_out = stream.compress_marks ? *stream.marks_compressed_hashing : *stream.marks_hashing; writeBinaryLittleEndian(stream_with_mark.mark.offset_in_compressed_file, marks_out); writeBinaryLittleEndian(stream_with_mark.mark.offset_in_decompressed_block, marks_out); From ab10830317d1a0d155aceec3f5285f299f661f02 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 00:12:06 +0200 Subject: [PATCH 098/566] Preparation --- src/Common/CurrentMetrics.cpp | 3 + src/IO/WriteSettings.h | 2 + .../MergeTreeDataPartWriterOnDisk.cpp | 74 ++++++++++++++----- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 11 +-- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 13 ++-- 5 files changed, 72 insertions(+), 31 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index da3b5557dbf..c9737e2e846 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -103,6 +103,9 @@ M(IOThreads, "Number of threads in the IO thread pool.") \ M(IOThreadsActive, "Number of threads in the IO thread pool running a task.") \ M(IOThreadsScheduled, "Number of queued or active jobs in the IO thread pool.") \ + M(CompressionThread, "Number of threads in compression thread pools.") \ + M(CompressionThreadActive, "Number of threads in compression thread pools running a task.") \ + M(CompressionThreadScheduled, "Number of queued or active jobs in compression thread pools.") \ M(ThreadPoolRemoteFSReaderThreads, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool.") \ M(ThreadPoolRemoteFSReaderThreadsActive, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool running a task.") \ M(ThreadPoolRemoteFSReaderThreadsScheduled, "Number of queued or active jobs in the thread pool for remote_filesystem_read_method=threadpool.") \ diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index 94410f787f0..8016dede6ea 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -28,6 +28,8 @@ struct WriteSettings bool use_adaptive_write_buffer = false; size_t adaptive_write_buffer_initial_size = 16 * 1024; + size_t max_compression_threads = 1; + bool write_through_distributed_cache = false; DistributedCacheSettings distributed_cache_settings; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index c250726aba1..8047d1cf2e9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -3,16 +3,26 @@ #include #include #include +#include #include + namespace ProfileEvents { -extern const Event MergeTreeDataWriterSkipIndicesCalculationMicroseconds; -extern const Event MergeTreeDataWriterStatisticsCalculationMicroseconds; + extern const Event MergeTreeDataWriterSkipIndicesCalculationMicroseconds; + extern const Event MergeTreeDataWriterStatisticsCalculationMicroseconds; +} + +namespace CurrentMetrics +{ + extern const Metric CompressionThread; + extern const Metric CompressionThreadActive; + extern const Metric CompressionThreadScheduled; } namespace DB { + namespace MergeTreeSetting { extern const MergeTreeSettingsUInt64 index_granularity; @@ -32,9 +42,9 @@ void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() /// Otherwise some data might stuck in the buffers above plain_file and marks_file /// Also the order is important - compressed_hashing.finalize(); + compressed_hashing->finalize(); compressor->finalize(); - plain_hashing.finalize(); + plain_hashing->finalize(); if (marks_hashing) { @@ -86,12 +96,36 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), - plain_hashing(*plain_file), - compressor(std::make_unique(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), - compressed_hashing(*compressor), compress_marks(MarkType(marks_file_extension).compressed) { + plain_file = data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings); + plain_hashing.emplace(*plain_file); + + if (query_write_settings.max_compression_threads > 1) + { + compression_thread_pool.emplace( + CurrentMetrics::CompressionThread, CurrentMetrics::CompressionThreadActive, CurrentMetrics::CompressionThreadScheduled, + query_write_settings.max_compression_threads); + + compressor = std::make_unique( + *plain_hashing, + compression_codec_, + max_compress_block_size_, + query_write_settings.max_compression_threads, + *compression_thread_pool); + } + else + { + compressor = std::make_unique( + *plain_hashing, + compression_codec_, + max_compress_block_size_, + query_write_settings.use_adaptive_write_buffer, + query_write_settings.adaptive_write_buffer_initial_size); + } + + compressed_hashing.emplace(*compressor); + marks_file = data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings); marks_hashing.emplace(*marks_file); marks_compressor.emplace(*marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size); @@ -110,7 +144,7 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( data_file_extension{data_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(std::make_unique(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), + compressor(std::make_unique(*plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), compressed_hashing(*compressor), compress_marks(false) { @@ -121,10 +155,10 @@ void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPa String name = escaped_column_name; checksums.files[name + data_file_extension].is_compressed = true; - checksums.files[name + data_file_extension].uncompressed_size = compressed_hashing.count(); - checksums.files[name + data_file_extension].uncompressed_hash = compressed_hashing.getHash(); - checksums.files[name + data_file_extension].file_size = plain_hashing.count(); - checksums.files[name + data_file_extension].file_hash = plain_hashing.getHash(); + checksums.files[name + data_file_extension].uncompressed_size = compressed_hashing->count(); + checksums.files[name + data_file_extension].uncompressed_hash = compressed_hashing->getHash(); + checksums.files[name + data_file_extension].file_size = plain_hashing->count(); + checksums.files[name + data_file_extension].file_hash = plain_hashing->getHash(); if (marks_hashing) { @@ -391,7 +425,7 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { if (skip_index_accumulated_marks[i] == index_helper->index.granularity) { - skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(stream.compressed_hashing); + skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(*stream.compressed_hashing); skip_index_accumulated_marks[i] = 0; } @@ -399,11 +433,11 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { skip_indices_aggregators[i] = index_helper->createIndexAggregatorForPart(store, settings); - if (stream.compressed_hashing.offset() >= settings.min_compress_block_size) - stream.compressed_hashing.next(); + if (stream.compressed_hashing->offset() >= settings.min_compress_block_size) + stream.compressed_hashing->next(); - writeBinaryLittleEndian(stream.plain_hashing.count(), marks_out); - writeBinaryLittleEndian(stream.compressed_hashing.offset(), marks_out); + writeBinaryLittleEndian(stream.plain_hashing->count(), marks_out); + writeBinaryLittleEndian(stream.compressed_hashing->offset(), marks_out); /// Actually this numbers is redundant, but we have to store them /// to be compatible with the normal .mrk2 file format @@ -483,7 +517,7 @@ void MergeTreeDataPartWriterOnDisk::fillSkipIndicesChecksums(MergeTreeData::Data { auto & stream = *skip_indices_streams[i]; if (!skip_indices_aggregators[i]->empty()) - skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(stream.compressed_hashing); + skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(*stream.compressed_hashing); /// Register additional files written only by the full-text index. Required because otherwise DROP TABLE complains about unknown /// files. Note that the provided actual checksums are bogus. The problem is that at this point the file writes happened already and @@ -523,7 +557,7 @@ void MergeTreeDataPartWriterOnDisk::fillStatisticsChecksums(MergeTreeData::DataP for (size_t i = 0; i < stats.size(); i++) { auto & stream = *stats_streams[i]; - stats[i]->serialize(stream.compressed_hashing); + stats[i]->serialize(*stream.compressed_hashing); stream.preFinalize(); stream.addToChecksums(checksums); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 0d80333368d..046571cb83f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -27,7 +27,7 @@ struct Granule /// this granule can be continuation of the previous one. bool mark_on_start; /// if true: When this granule will be written to disk all rows for corresponding mark will - /// be wrtten. It doesn't mean that rows_to_write == index_granularity.getMarkRows(mark_number), + /// be written. It doesn't mean that rows_to_write == index_granularity.getMarkRows(mark_number), /// We may have a lot of small blocks between two marks and this may be the last one. bool is_complete; }; @@ -74,10 +74,10 @@ public: /// compressed_hashing -> compressor -> plain_hashing -> plain_file std::unique_ptr plain_file; - HashingWriteBuffer plain_hashing; + std::optional plain_hashing; /// This could be either CompressedWriteBuffer or ParallelCompressedWriteBuffer std::unique_ptr compressor; - HashingWriteBuffer compressed_hashing; + std::optional compressed_hashing; /// marks_compressed_hashing -> marks_compressor -> marks_hashing -> marks_file std::unique_ptr marks_file; @@ -88,10 +88,11 @@ public: bool is_prefinalized = false; + /// Thread pool for parallel compression. + std::optional compression_thread_pool; + void preFinalize(); - void finalize(); - void sync() const; void addToChecksums(MergeTreeDataPartChecksums & checksums); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index d1d4aa4f5b0..523e4c4a31d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -9,6 +9,7 @@ #include #include + namespace DB { @@ -230,7 +231,7 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett if (is_offsets && offset_columns.contains(stream_name)) return nullptr; - return &column_streams.at(stream_name)->compressed_hashing; + return &column_streams.at(stream_name)->compressed_hashing.value(); }; } @@ -399,13 +400,13 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( auto & stream = *column_streams[stream_name]; /// There could already be enough data to compress into the new block. - if (stream.compressed_hashing.offset() >= min_compress_block_size) - stream.compressed_hashing.next(); + if (stream.compressed_hashing->offset() >= min_compress_block_size) + stream.compressed_hashing->next(); StreamNameAndMark stream_with_mark; stream_with_mark.stream_name = stream_name; - stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing.count(); - stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing.offset(); + stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing->count(); + stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing->offset(); result.push_back(stream_with_mark); }, name_and_type.type, column_sample); @@ -438,7 +439,7 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( if (is_offsets && offset_columns.contains(stream_name)) return; - column_streams.at(stream_name)->compressed_hashing.nextIfAtEnd(); + column_streams.at(stream_name)->compressed_hashing->nextIfAtEnd(); }, name_and_type.type, column.getPtr()); } From bb3bfa536ac54185fea01d23fa22d970a2cccf84 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 00:35:01 +0200 Subject: [PATCH 099/566] Make it configurable --- src/IO/WriteSettings.h | 1 + .../MergeTree/IMergeTreeDataPartWriter.cpp | 22 ++++++++++++++++--- .../MergeTree/IMergedBlockOutputStream.cpp | 1 + .../MergeTree/IMergedBlockOutputStream.h | 1 + src/Storages/MergeTree/MergeTask.cpp | 9 +------- .../MergeTree/MergeTreeIOSettings.cpp | 2 ++ src/Storages/MergeTree/MergeTreeSettings.cpp | 3 +++ 7 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index 8016dede6ea..4eeb01b5acc 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -4,6 +4,7 @@ #include #include + namespace DB { diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index 3d6366f9217..a9f188338e1 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -178,9 +178,24 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( const MergeTreeIndexGranularity & computed_index_granularity) { if (part_type == MergeTreeDataPartType::Compact) - return createMergeTreeDataPartCompactWriter(data_part_name_, logger_name_, serializations_, data_part_storage_, - index_granularity_info_, storage_settings_, columns_list, column_positions, metadata_snapshot, virtual_columns, indices_to_recalc, stats_to_recalc_, - marks_file_extension_, default_codec_, writer_settings, computed_index_granularity); + return createMergeTreeDataPartCompactWriter( + data_part_name_, + logger_name_, + serializations_, + data_part_storage_, + index_granularity_info_, + storage_settings_, + columns_list, + column_positions, + metadata_snapshot, + virtual_columns, + indices_to_recalc, + stats_to_recalc_, + marks_file_extension_, + default_codec_, + writer_settings, + computed_index_granularity); + if (part_type == MergeTreeDataPartType::Wide) return createMergeTreeDataPartWideWriter( data_part_name_, @@ -198,6 +213,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( default_codec_, writer_settings, computed_index_granularity); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown part type: {}", part_type.toString()); } diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index eb904a8e2ef..209b274ee6a 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -4,6 +4,7 @@ #include #include + namespace DB { diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index cfcfb177e05..f67cf66ee50 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -7,6 +7,7 @@ #include #include + namespace DB { diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 74d6d60ba1b..b03fb1b12cf 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -6,11 +6,8 @@ #include #include -#include #include #include -#include -#include #include #include #include @@ -20,10 +17,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -34,9 +29,6 @@ #include #include #include -#include -#include -#include #include #include #include @@ -48,6 +40,7 @@ #include #include + namespace ProfileEvents { extern const Event Merge; diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.cpp b/src/Storages/MergeTree/MergeTreeIOSettings.cpp index 8b87c35b4e6..6705d75af41 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeIOSettings.cpp @@ -26,6 +26,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsString primary_key_compression_codec; extern const MergeTreeSettingsBool use_adaptive_write_buffer_for_dynamic_subcolumns; extern const MergeTreeSettingsBool use_compact_variant_discriminators_serialization; + extern const MergeTreeSettingsUInt64 max_compression_threads; } MergeTreeWriterSettings::MergeTreeWriterSettings( @@ -54,6 +55,7 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( , use_adaptive_write_buffer_for_dynamic_subcolumns((*storage_settings)[MergeTreeSetting::use_adaptive_write_buffer_for_dynamic_subcolumns]) , adaptive_write_buffer_initial_size((*storage_settings)[MergeTreeSetting::adaptive_write_buffer_initial_size]) { + query_write_settings.max_compression_threads = (*storage_settings)[MergeTreeSetting::max_compression_threads]; } } diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 86d95aee242..4e7d0c0a721 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -53,6 +53,9 @@ namespace ErrorCodes M(Bool, load_existing_rows_count_for_old_parts, false, "Whether to load existing_rows_count for existing parts. If false, existing_rows_count will be equal to rows_count for existing parts.", 0) \ M(Bool, use_compact_variant_discriminators_serialization, true, "Use compact version of Variant discriminators serialization.", 0) \ \ + /** Merge and insert settings */ \ + M(UInt64, max_compression_threads, 1, "Maximum number of threads for writing compressed data. This is an expert-level setting, do not change it.", 0) \ + \ /** Merge selector settings. */ \ M(UInt64, merge_selector_blurry_base_scale_factor, 0, "Controls when the logic kicks in relatively to the number of parts in partition. The bigger the factor the more belated reaction will be.", 0) \ M(UInt64, merge_selector_window_size, 1000, "How many parts to look at once.", 0) \ From 0f3f15338d5c85fa159a85e929c689d69610d4ef Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 02:09:15 +0200 Subject: [PATCH 100/566] Something --- src/Common/ThreadPool.h | 4 +-- .../ParallelCompressedWriteBuffer.cpp | 2 +- src/Dictionaries/HashedDictionary.h | 28 +++++++++++-------- .../Transforms/AggregatingTransform.h | 2 +- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/Common/ThreadPool.h b/src/Common/ThreadPool.h index 7e497245acc..b52e4a60571 100644 --- a/src/Common/ThreadPool.h +++ b/src/Common/ThreadPool.h @@ -122,7 +122,7 @@ public: void scheduleOrThrowOnError(Job job, Priority priority = {}); /// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or return false. - bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept; + [[nodiscard]] bool trySchedule(Job job, Priority priority = {}, uint64_t wait_microseconds = 0) noexcept; /// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or throw an exception. void scheduleOrThrow(Job job, Priority priority = {}, uint64_t wait_microseconds = 0, bool propagate_opentelemetry_tracing_context = true); @@ -142,7 +142,7 @@ public: /// Returns true if the pool already terminated /// (and any further scheduling will produce CANNOT_SCHEDULE_TASK exception) - bool finished() const; + [[nodiscard]] bool finished() const; void setMaxThreads(size_t value); void setMaxFreeThreads(size_t value); diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 1041a14979e..bd8d6371501 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -46,7 +46,7 @@ void ParallelCompressedWriteBuffer::nextImpl() current_buffer->sequence_num = current_sequence_num; ++current_sequence_num; current_buffer->uncompressed_size = offset(); - pool.trySchedule([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] + pool.scheduleOrThrowOnError([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] { SCOPE_EXIT_SAFE( if (thread_group) diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index 7e935fe4855..ec5bc0a8a35 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -334,22 +334,26 @@ HashedDictionary::~HashedDictionary() if (container.empty()) return; - pool.trySchedule([&container, thread_group = CurrentThread::getGroup()] - { - SCOPE_EXIT_SAFE( + if (!pool.trySchedule([&container, thread_group = CurrentThread::getGroup()] + { + SCOPE_EXIT_SAFE( + if (thread_group) + CurrentThread::detachFromGroupIfNotDetached(); + ); + + /// Do not account memory that was occupied by the dictionaries for the query/user context. + MemoryTrackerBlockerInThread memory_blocker; + if (thread_group) - CurrentThread::detachFromGroupIfNotDetached(); - ); + CurrentThread::attachToGroupIfDetached(thread_group); + setThreadName("HashedDictDtor"); - /// Do not account memory that was occupied by the dictionaries for the query/user context. + clearContainer(container); + })) + { MemoryTrackerBlockerInThread memory_blocker; - - if (thread_group) - CurrentThread::attachToGroupIfDetached(thread_group); - setThreadName("HashedDictDtor"); - clearContainer(container); - }); + } ++hash_tables_count; }; diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index b9212375c91..398d7efa97e 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -107,7 +107,7 @@ struct ManyAggregatedData if (variant->aggregator) { // variant is moved here and will be destroyed in the destructor of the lambda function. - pool->trySchedule( + pool->scheduleOrThrowOnError( [my_variant = std::move(variant), thread_group = CurrentThread::getGroup()]() { SCOPE_EXIT_SAFE( From dffaf9b9a5b69ec4e342192ecbf00ad2c90208d8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 03:32:19 +0200 Subject: [PATCH 101/566] Fix error --- src/Compression/ParallelCompressedWriteBuffer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index bd8d6371501..08c2a78c80b 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -122,7 +122,7 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) if (can_write_directly) { char * out_compressed_ptr = out.position() + sizeof(CityHash_v1_0_2::uint128); - UInt32 compressed_size = codec->compress(working_buffer.begin(), uncompressed_size, out_compressed_ptr); + UInt32 compressed_size = codec->compress(buffer->uncompressed.data(), uncompressed_size, out_compressed_ptr); CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size); From dba7c9cf4a990c2b29f8351b51f16a3614802499 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 05:13:34 +0200 Subject: [PATCH 102/566] Add a test --- .../0_stateless/03254_parallel_compression.sql | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 tests/queries/0_stateless/03254_parallel_compression.sql diff --git a/tests/queries/0_stateless/03254_parallel_compression.sql b/tests/queries/0_stateless/03254_parallel_compression.sql new file mode 100644 index 00000000000..a17deed7d8c --- /dev/null +++ b/tests/queries/0_stateless/03254_parallel_compression.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test2; + +CREATE TABLE test2 +( + k UInt64 +) ENGINE = MergeTree ORDER BY k SETTINGS min_compress_block_size = 10240, min_bytes_for_wide_part = 1, max_compression_threads = 64; + +INSERT INTO test2 SELECT number FROM numbers(20000); +SELECT sum(k) = (9999 * 10000 / 2 + 10000 * 9999) FROM test2 WHERE k > 10000; + +DROP TABLE test2; From e6bae901ed9a149e81cdf50e358470326b140c32 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Oct 2024 18:39:20 +0200 Subject: [PATCH 103/566] Incomplete --- .../ParallelCompressedWriteBuffer.cpp | 6 ++++- .../ParallelCompressedWriteBuffer.h | 26 ++++++------------- .../MergeTreeDataPartWriterOnDisk.cpp | 2 ++ .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 1 + .../MergeTree/MergeTreeDataPartWriterWide.cpp | 19 +++++++++----- 5 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 08c2a78c80b..303e1ece68a 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -44,6 +44,8 @@ void ParallelCompressedWriteBuffer::nextImpl() /// The buffer will be compressed and processed in the thread. current_buffer->busy = true; current_buffer->sequence_num = current_sequence_num; + current_buffer->out_callback = callback; + callback = {}; ++current_sequence_num; current_buffer->uncompressed_size = offset(); pool.scheduleOrThrowOnError([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] @@ -60,7 +62,7 @@ void ParallelCompressedWriteBuffer::nextImpl() compress(my_current_buffer); }); - const BufferPair * previous_buffer = &*current_buffer; + BufferPair * previous_buffer = &*current_buffer; ++current_buffer; if (current_buffer == buffers.end()) { @@ -153,6 +155,8 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) } std::unique_lock lock(mutex); + if (buffer->out_callback) + buffer->out_callback(); buffer->busy = false; cond.notify_all(); } diff --git a/src/Compression/ParallelCompressedWriteBuffer.h b/src/Compression/ParallelCompressedWriteBuffer.h index 4d1dfc79797..8c5f249b06c 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.h +++ b/src/Compression/ParallelCompressedWriteBuffer.h @@ -31,24 +31,11 @@ public: ~ParallelCompressedWriteBuffer() override; - /// The amount of compressed data - size_t getCompressedBytes() + /// This function will be called once after compressing the next data and sending it to the out. + /// It can be used to fill information about marks. + void setCompletionCallback(std::function callback_) { - nextIfAtEnd(); - return out.count(); - } - - /// How many uncompressed bytes were written to the buffer - size_t getUncompressedBytes() - { - return count(); - } - - /// How many bytes are in the buffer (not yet compressed) - size_t getRemainingBytes() - { - nextIfAtEnd(); - return offset(); + callback = callback_; } private: @@ -71,15 +58,18 @@ private: Memory<> uncompressed; size_t uncompressed_size = 0; PODArray compressed; - const BufferPair * previous = nullptr; + BufferPair * previous = nullptr; size_t sequence_num = 0; bool busy = false; + std::function out_callback; }; std::mutex mutex; std::condition_variable cond; std::list buffers; + std::function callback; + using Iterator = std::list::iterator; Iterator current_buffer; size_t current_sequence_num = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 8047d1cf2e9..89db8174636 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -113,6 +113,8 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( max_compress_block_size_, query_write_settings.max_compression_threads, *compression_thread_pool); + + is_compressor_parallel = true; } else { diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 046571cb83f..cb46785ccbd 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -76,6 +76,7 @@ public: std::unique_ptr plain_file; std::optional plain_hashing; /// This could be either CompressedWriteBuffer or ParallelCompressedWriteBuffer + bool is_compressor_parallel = false; std::unique_ptr compressor; std::optional compressed_hashing; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 523e4c4a31d..860722ba870 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -400,15 +400,22 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( auto & stream = *column_streams[stream_name]; /// There could already be enough data to compress into the new block. + auto push_mark = [&] + { + StreamNameAndMark stream_with_mark; + stream_with_mark.stream_name = stream_name; + stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing->count(); + stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing->offset(); + result.push_back(stream_with_mark); + }; + if (stream.compressed_hashing->offset() >= min_compress_block_size) + { + stream.compressed_hashing->next(); + } - StreamNameAndMark stream_with_mark; - stream_with_mark.stream_name = stream_name; - stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing->count(); - stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing->offset(); - - result.push_back(stream_with_mark); + push_mark(); }, name_and_type.type, column_sample); return result; From e8fdacdeced2a1bab0600524db90fc6cb29aaaf2 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:50:16 +0000 Subject: [PATCH 104/566] fix --- src/Interpreters/Cache/Metadata.cpp | 1 + src/Interpreters/TemporaryDataOnDisk.cpp | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 2ee985b1c31..6a2cca33a13 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,6 +470,7 @@ private: void CacheMetadata::cleanupThreadFunc() { + LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c0c9d0a80c5..ea29afbe1fa 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -59,7 +59,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); - LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); + LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -270,6 +270,9 @@ std::unique_ptr TemporaryDataBuffer::read() { finishWriting(); + if (stat.compressed_size == 0 && stat.uncompressed_size == 0) + return std::make_unique(std::make_unique()); + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); return std::make_unique(file_holder->read(buffer_size)); From 881f1a94ae72433a1b1c49ee76877a3af66b1527 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:52:18 +0000 Subject: [PATCH 105/566] fix tidy --- src/Interpreters/TemporaryDataOnDisk.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index 86fa9e57e81..eab3571dd07 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -115,7 +115,7 @@ class WrapperGuard { public: template - WrapperGuard(std::unique_ptr holder_, Args && ... args) + explicit WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) { @@ -127,7 +127,7 @@ public: const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } Impl & operator*() { chassert(impl); chassert(holder); return *impl; } const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } - operator bool() const { return impl != nullptr; } + operator bool() const { return impl != nullptr; } /// NOLINT const Holder * getHolder() const { return holder.get(); } Holder * getHolder() { return holder.get(); } From 0e702fc56d55900aaf9ce18696f18cd4855d9d17 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:57:32 +0000 Subject: [PATCH 106/566] upd tests/integration/test_tmp_policy/test.py --- tests/integration/test_tmp_policy/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py index 870a70b127a..097f93660b2 100644 --- a/tests/integration/test_tmp_policy/test.py +++ b/tests/integration/test_tmp_policy/test.py @@ -35,8 +35,8 @@ def test_disk_selection(start_cluster): node.query(query, settings=settings) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk1/" + "Writing part of aggregation data into temporary file.*/disk1/" ) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk2/" + "Writing part of aggregation data into temporary file.*/disk2/" ) From eccf5dd15e91663adb0c54e045f6c87e789656b3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 22 Oct 2024 13:19:42 +0000 Subject: [PATCH 107/566] better error message --- src/Interpreters/TemporaryDataOnDisk.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index ea29afbe1fa..60bfd379a72 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include @@ -98,7 +100,24 @@ public: { auto reservation = volume->reserve(max_file_size); if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + { + auto disks = volume->getDisks(); + Strings disks_info; + for (const auto & d : disks) + { + auto to_double = [](auto x) { return static_cast(x); }; + disks_info.push_back(fmt::format("{}: available: {} unreserved: {}, total: {}, keeping: {}", + d->getName(), + ReadableSize(d->getAvailableSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getUnreservedSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getTotalSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getKeepingFreeSpace()))); + } + + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, + "Not enough space on temporary disk, cannot reserve {} bytes on [{}]", + max_file_size, fmt::join(disks_info, ", ")); + } disk = reservation->getDisk(); } else From e3ebe51968acf6a43922f12a9443c8e17a9cabc2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 23 Oct 2024 01:27:10 +0000 Subject: [PATCH 108/566] Make ParquetMetadata say whether bloom filter is present --- .../Impl/ParquetMetadataInputFormat.cpp | 5 +- .../02718_parquet_metadata_format.reference | 70 +++++++++++++++++-- .../02718_parquet_metadata_format.sh | 1 + 3 files changed, 69 insertions(+), 7 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp index 7fd6e93dd80..8264b565e39 100644 --- a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp @@ -92,8 +92,9 @@ static NamesAndTypesList getHeaderForParquetMetadata() std::make_shared(std::make_shared()), std::make_shared(std::make_shared())}, Names{"num_values", "null_count", "distinct_count", "min", "max"}), + DataTypeFactory::instance().get("Bool"), }, - Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics"}))}, + Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics", "have_bloom_filter"}))}, Names{"num_columns", "num_rows", "total_uncompressed_size", "total_compressed_size", "columns"}))}, }; return names_and_types; @@ -350,6 +351,8 @@ void ParquetMetadataInputFormat::fillColumnChunksMetadata(const std::unique_ptr< fillColumnStatistics(column_chunk_metadata->statistics(), tuple_column.getColumn(5), row_group_metadata->schema()->Column(column_i)->type_length()); else tuple_column.getColumn(5).insertDefault(); + bool have_bloom_filter = column_chunk_metadata->bloom_filter_offset().has_value(); + assert_cast(tuple_column.getColumn(6)).insertValue(have_bloom_filter); } array_column.getOffsets().push_back(tuple_column.size()); } diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.reference b/tests/queries/0_stateless/02718_parquet_metadata_format.reference index 1f55c29da56..815968aeba5 100644 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.reference +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.reference @@ -78,7 +78,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -92,7 +93,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -106,7 +108,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] }, @@ -128,7 +131,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -142,7 +146,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -156,7 +161,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] } @@ -223,3 +229,55 @@ } 1 1 +{ + "num_columns": "1", + "num_rows": "5", + "num_row_groups": "1", + "format_version": "1.0", + "metadata_size": "267", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "max_definition_level": "0", + "max_repetition_level": "0", + "physical_type": "FIXED_LEN_BYTE_ARRAY", + "logical_type": "None", + "compression": "GZIP", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "space_saved": "-21.9%", + "encodings": [ + "PLAIN", + "BIT_PACKED" + ] + } + ], + "row_groups": [ + { + "num_columns": "1", + "num_rows": "5", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "total_compressed_size": "128", + "total_uncompressed_size": "105", + "have_statistics": true, + "statistics": { + "num_values": "5", + "null_count": "0", + "distinct_count": null, + "min": "27 32 150 125 17 250 66 31 157 44 75 218 51 50 19 144 ", + "max": "154 31 90 141 15 7 68 47 190 29 121 145 188 162 234 154 " + }, + "have_bloom_filter": true + } + ] + } + ] +} diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.sh b/tests/queries/0_stateless/02718_parquet_metadata_format.sh index 94d7f453850..c6371cff7a3 100755 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.sh +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.sh @@ -17,3 +17,4 @@ $CLICKHOUSE_LOCAL -q "select some_column from file('$CURDIR/data_parquet/02718_d $CLICKHOUSE_LOCAL -q "select num_columns from file('$CURDIR/data_parquet/02718_data.parquet', ParquetMetadata, 'num_columns Array(UInt32)')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_parquet/ipv6_bloom_filter.gz.parquet', ParquetMetadata) format JSONEachRow" | python3 -m json.tool From 8a0c6897f8c349d4a63d1330c226ffcce849df9e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:21:58 -0400 Subject: [PATCH 109/566] enable enable_job_stack_trace by default --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 1790697d03e..d3c993250fb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2830,7 +2830,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from DECLARE(Bool, calculate_text_stack_trace, true, R"( Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option. )", 0) \ - DECLARE(Bool, enable_job_stack_trace, false, R"( + DECLARE(Bool, enable_job_stack_trace, true, R"( Output stack trace of a job creator when job results in exception )", 0) \ DECLARE(Bool, allow_ddl, true, R"( From e19bf218f69448c9605f269ae7a3894bc24f0003 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 25 Oct 2024 06:12:50 +0000 Subject: [PATCH 110/566] Fix 'Unknown executor' when reading from stdin in clickhouse local --- programs/local/LocalServer.cpp | 5 +++++ programs/local/LocalServer.h | 2 +- src/Client/ClientBase.cpp | 3 ++- src/Client/ClientBase.h | 2 ++ .../03031_clickhouse_local_input.reference | 4 +++- .../0_stateless/03031_clickhouse_local_input.sh | 17 ++++++++++++++--- 6 files changed, 27 insertions(+), 6 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index b6b67724b0a..4b861d579ab 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -130,6 +130,11 @@ void applySettingsOverridesForLocal(ContextMutablePtr context) context->setSettings(settings); } +LocalServer::LocalServer() +{ + is_local = true; +} + Poco::Util::LayeredConfiguration & LocalServer::getClientConfiguration() { return config(); diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 7e92e92d345..ced25dbdf90 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -23,7 +23,7 @@ namespace DB class LocalServer : public ClientApplicationBase, public Loggers { public: - LocalServer() = default; + LocalServer(); void initialize(Poco::Util::Application & self) override; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 23aa7e841cb..b6223cf6872 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1748,7 +1748,8 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des } else if (!is_interactive) { - sendDataFromStdin(sample, columns_description_for_query, parsed_query); + if (!is_local) + sendDataFromStdin(sample, columns_description_for_query, parsed_query); } else throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert"); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index b06958f1d14..daf3ee7e3e4 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -263,6 +263,8 @@ protected: bool is_interactive = false; /// Use either interactive line editing interface or batch mode. bool delayed_interactive = false; + bool is_local = false; /// clickhouse-local, otherwise clickhouse-client + bool echo_queries = false; /// Print queries before execution in batch mode. bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.reference b/tests/queries/0_stateless/03031_clickhouse_local_input.reference index a6feeef100d..529f1832598 100644 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.reference +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.reference @@ -1,4 +1,6 @@ -# foo +# foo (pipe) +foo +# foo (file) foo # !foo # bar diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.sh b/tests/queries/0_stateless/03031_clickhouse_local_input.sh index e2f9cf48108..540e1203154 100755 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.sh +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.sh @@ -4,17 +4,28 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -tmp_file="$CUR_DIR/$CLICKHOUSE_DATABASE.txt" -echo '# foo' +tmp_file="$CUR_DIR/03031_$CLICKHOUSE_DATABASE.txt" +tmp_input="$CUR_DIR/03031_${CLICKHOUSE_DATABASE}_in.txt" + +echo '# foo (pipe)' $CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" << "$tmp_input" +$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" <"$tmp_input" +cat "$tmp_file" + echo '# !foo' $CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') where x != 'foo' format LineAsString" << Date: Fri, 25 Oct 2024 08:24:30 +0000 Subject: [PATCH 111/566] Also fix 'Input initializer is not set' in another query --- src/Interpreters/InterpreterInsertQuery.cpp | 3 +-- .../0_stateless/03031_clickhouse_local_input.reference | 2 ++ tests/queries/0_stateless/03031_clickhouse_local_input.sh | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 80b9d91a248..797895e4a93 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -121,8 +121,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) if (current_context->getSettingsRef()[Setting::allow_experimental_analyzer]) { - InterpreterSelectQueryAnalyzer interpreter_select(query.select, current_context, select_query_options); - header_block = interpreter_select.getSampleBlock(); + header_block = InterpreterSelectQueryAnalyzer::getSampleBlock(query.select, current_context, select_query_options); } else { diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.reference b/tests/queries/0_stateless/03031_clickhouse_local_input.reference index 529f1832598..c6e6b743759 100644 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.reference +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.reference @@ -7,3 +7,5 @@ foo bar # defaults bam +# inferred destination table structure +foo diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.sh b/tests/queries/0_stateless/03031_clickhouse_local_input.sh index 540e1203154..cfd8c2957bb 100755 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.sh +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.sh @@ -28,4 +28,8 @@ echo '# defaults' $CLICKHOUSE_LOCAL --input_format_tsv_empty_as_default=1 --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select y from input('x String, y String DEFAULT \\'bam\\'') format TSV" <<<$'foo\t' cat "$tmp_file" +echo '# inferred destination table structure' +$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'TSV') select * from input('x String') format LineAsString" <"$tmp_input" +cat "$tmp_file" + rm -f "${tmp_file:?}" "${tmp_input:?}" From 3da0b2573a5e13c715562d5f8e544480ebf9cc2b Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 25 Oct 2024 09:12:52 +0000 Subject: [PATCH 112/566] Better fix --- programs/local/LocalServer.cpp | 5 ----- programs/local/LocalServer.h | 2 +- src/Client/ClientBase.cpp | 8 ++++++-- src/Client/ClientBase.h | 2 -- src/Client/IServerConnection.h | 4 ++++ src/Client/LocalConnection.cpp | 5 +++++ src/Client/LocalConnection.h | 2 ++ .../0_stateless/03031_clickhouse_local_input.reference | 4 ++++ tests/queries/0_stateless/03031_clickhouse_local_input.sh | 8 ++++++++ 9 files changed, 30 insertions(+), 10 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 4b861d579ab..b6b67724b0a 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -130,11 +130,6 @@ void applySettingsOverridesForLocal(ContextMutablePtr context) context->setSettings(settings); } -LocalServer::LocalServer() -{ - is_local = true; -} - Poco::Util::LayeredConfiguration & LocalServer::getClientConfiguration() { return config(); diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index ced25dbdf90..7e92e92d345 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -23,7 +23,7 @@ namespace DB class LocalServer : public ClientApplicationBase, public Loggers { public: - LocalServer(); + LocalServer() = default; void initialize(Poco::Util::Application & self) override; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index b6223cf6872..f5351b94a94 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1630,6 +1630,11 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des if (!parsed_insert_query) return; + /// If it's clickhouse-local, and the input data reading is already baked into the query pipeline, + /// don't read the data again here. + if (!connection->isSendDataNeeded()) + return; + bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && isStdinNotEmptyAndValid(std_in); if (need_render_progress) @@ -1748,8 +1753,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des } else if (!is_interactive) { - if (!is_local) - sendDataFromStdin(sample, columns_description_for_query, parsed_query); + sendDataFromStdin(sample, columns_description_for_query, parsed_query); } else throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert"); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index daf3ee7e3e4..b06958f1d14 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -263,8 +263,6 @@ protected: bool is_interactive = false; /// Use either interactive line editing interface or batch mode. bool delayed_interactive = false; - bool is_local = false; /// clickhouse-local, otherwise clickhouse-client - bool echo_queries = false; /// Print queries before execution in batch mode. bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index 6ab4234bca2..fe69be8788a 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -109,6 +109,10 @@ public: /// Send block of data; if name is specified, server will write it to external (temporary) table of that name. virtual void sendData(const Block & block, const String & name, bool scalar) = 0; + /// Whether the client needs to read and send the data for the INSERT. + /// False if the server will read the data through other means (in particular if clickhouse-local added input reading step directly into the query pipeline). + virtual bool isSendDataNeeded() const { return true; } + /// Send all contents of external (temporary) tables. virtual void sendExternalTablesData(ExternalTablesData & data) = 0; diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index e4915a77c83..4ca209c29c7 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -328,6 +328,11 @@ void LocalConnection::sendData(const Block & block, const String &, bool) sendProfileEvents(); } +bool LocalConnection::isSendDataNeeded() const +{ + return !state || state->input_pipeline == nullptr; +} + void LocalConnection::sendCancel() { state->is_cancelled = true; diff --git a/src/Client/LocalConnection.h b/src/Client/LocalConnection.h index b424c5b5aa3..a70ed6ffa7e 100644 --- a/src/Client/LocalConnection.h +++ b/src/Client/LocalConnection.h @@ -120,6 +120,8 @@ public: void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override; + bool isSendDataNeeded() const override; + void sendExternalTablesData(ExternalTablesData &) override; void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override; diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.reference b/tests/queries/0_stateless/03031_clickhouse_local_input.reference index c6e6b743759..bbb57da94ce 100644 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.reference +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.reference @@ -9,3 +9,7 @@ bar bam # inferred destination table structure foo +# direct +foo +# infile +foo diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.sh b/tests/queries/0_stateless/03031_clickhouse_local_input.sh index cfd8c2957bb..f271a5184fd 100755 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.sh +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.sh @@ -32,4 +32,12 @@ echo '# inferred destination table structure' $CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'TSV') select * from input('x String') format LineAsString" <"$tmp_input" cat "$tmp_file" +echo '# direct' +$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') format LineAsString" <"$tmp_input" +cat "$tmp_file" + +echo '# infile' +$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -q "insert into function file('$tmp_file', 'LineAsString', 'x String') from infile '$tmp_input' format LineAsString" +cat "$tmp_file" + rm -f "${tmp_file:?}" "${tmp_input:?}" From 45e23584f4cee58bf9c0f0612e4799076c0d21e8 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 25 Oct 2024 09:15:53 +0000 Subject: [PATCH 113/566] Comment --- src/Client/ClientBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index f5351b94a94..6475d682b65 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1631,7 +1631,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des return; /// If it's clickhouse-local, and the input data reading is already baked into the query pipeline, - /// don't read the data again here. + /// don't read the data again here. This happens in some cases (e.g. input() table function) but not others (e.g. INFILE). if (!connection->isSendDataNeeded()) return; From 349af95cd1e2c998391bec9710bacb0458175835 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 10:45:32 +0000 Subject: [PATCH 114/566] fix data race --- src/Interpreters/Cache/Metadata.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 6a2cca33a13..2ee985b1c31 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,7 +470,6 @@ private: void CacheMetadata::cleanupThreadFunc() { - LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; From 084f878fb19995763e3db825752dc61c9d768b43 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:10:33 +0000 Subject: [PATCH 115/566] log --- src/Interpreters/Aggregator.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index bb9e22e5a1b..2dd6513d498 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,6 +1504,7 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1520,6 +1521,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2932,6 +2934,7 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } From 54b93953847699f1f9d14939bd1e0067d933dbba Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:11:19 +0000 Subject: [PATCH 116/566] fix typo --- src/Storages/MergeTree/MergeTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 1009458574e..d781cef9f17 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -1614,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, From a7b23292f962eada087b2b7518c231b57ca71493 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Fri, 25 Oct 2024 17:58:43 +0000 Subject: [PATCH 117/566] add staleness to sql --- src/Analyzer/QueryTreeBuilder.cpp | 2 ++ src/Analyzer/Resolve/QueryAnalyzer.cpp | 43 ++++++++++++++++++++++-- src/Analyzer/Resolve/QueryAnalyzer.h | 3 +- src/Analyzer/SortNode.cpp | 8 +++++ src/Analyzer/SortNode.h | 21 +++++++++++- src/Parsers/ASTOrderByElement.cpp | 5 +++ src/Parsers/ASTOrderByElement.h | 3 ++ src/Parsers/CommonParsers.h | 1 + src/Parsers/ExpressionElementParsers.cpp | 6 ++++ src/Planner/Planner.cpp | 3 ++ src/Planner/PlannerActionsVisitor.cpp | 3 ++ src/Planner/PlannerSorting.cpp | 24 +++++++++++-- 12 files changed, 115 insertions(+), 7 deletions(-) diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index 39c59d27e2c..d3c88d39213 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -498,6 +498,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context); if (order_by_element.getFillStep()) sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context); + if (order_by_element.getFillStaleness()) + sort_node->getFillStaleness() = buildExpression(order_by_element.getFillStaleness(), context); list_node->getNodes().push_back(std::move(sort_node)); } diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 381edee607d..ab29373f5fb 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -432,8 +432,13 @@ ProjectionName QueryAnalyzer::calculateWindowProjectionName(const QueryTreeNodeP return buffer.str(); } -ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, const ProjectionName & sort_expression_projection_name, - const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, const ProjectionName & fill_step_expression_projection_name) +ProjectionName QueryAnalyzer::calculateSortColumnProjectionName( + const QueryTreeNodePtr & sort_column_node, + const ProjectionName & sort_expression_projection_name, + const ProjectionName & fill_from_expression_projection_name, + const ProjectionName & fill_to_expression_projection_name, + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name) { auto & sort_node_typed = sort_column_node->as(); @@ -463,6 +468,9 @@ ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeN if (sort_node_typed.hasFillStep()) sort_column_projection_name_buffer << " STEP " << fill_step_expression_projection_name; + + if (sort_node_typed.hasFillStaleness()) + sort_column_projection_name_buffer << " STALENESS " << fill_staleness_expression_projection_name; } return sort_column_projection_name_buffer.str(); @@ -3993,6 +4001,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ ProjectionNames fill_from_expression_projection_names; ProjectionNames fill_to_expression_projection_names; ProjectionNames fill_step_expression_projection_names; + ProjectionNames fill_staleness_expression_projection_names; auto & sort_node_list_typed = sort_node_list->as(); for (auto & node : sort_node_list_typed.getNodes()) @@ -4083,11 +4092,38 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_step_expression_projection_names_size); } + if (sort_node.hasFillStaleness()) + { + fill_staleness_expression_projection_names = resolveExpressionNode(sort_node.getFillStaleness(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto * constant_node = sort_node.getFillStaleness()->as(); + if (!constant_node) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + bool is_number = isColumnedAsNumber(constant_node->getResultType()); + bool is_interval = WhichDataType(constant_node->getResultType()).isInterval(); + if (!is_number && !is_interval) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_staleness_expression_projection_names_size = fill_staleness_expression_projection_names.size(); + if (fill_staleness_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort FILL STALENESS expression expected 1 projection name. Actual {}", + fill_staleness_expression_projection_names_size); + } + auto sort_column_projection_name = calculateSortColumnProjectionName(node, sort_expression_projection_names[0], fill_from_expression_projection_names.empty() ? "" : fill_from_expression_projection_names.front(), fill_to_expression_projection_names.empty() ? "" : fill_to_expression_projection_names.front(), - fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front()); + fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front(), + fill_staleness_expression_projection_names.empty() ? "" : fill_staleness_expression_projection_names.front()); result_projection_names.push_back(std::move(sort_column_projection_name)); @@ -4095,6 +4131,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_from_expression_projection_names.clear(); fill_to_expression_projection_names.clear(); fill_step_expression_projection_names.clear(); + fill_staleness_expression_projection_names.clear(); } return result_projection_names; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.h b/src/Analyzer/Resolve/QueryAnalyzer.h index 0d4309843e6..d24bede561e 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.h +++ b/src/Analyzer/Resolve/QueryAnalyzer.h @@ -140,7 +140,8 @@ private: const ProjectionName & sort_expression_projection_name, const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, - const ProjectionName & fill_step_expression_projection_name); + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name); QueryTreeNodePtr tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context); diff --git a/src/Analyzer/SortNode.cpp b/src/Analyzer/SortNode.cpp index e891046626a..42c010e4784 100644 --- a/src/Analyzer/SortNode.cpp +++ b/src/Analyzer/SortNode.cpp @@ -69,6 +69,12 @@ void SortNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, si buffer << '\n' << std::string(indent + 2, ' ') << "FILL STEP\n"; getFillStep()->dumpTreeImpl(buffer, format_state, indent + 4); } + + if (hasFillStaleness()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL STALENESS\n"; + getFillStaleness()->dumpTreeImpl(buffer, format_state, indent + 4); + } } bool SortNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const @@ -132,6 +138,8 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const result->setFillTo(getFillTo()->toAST(options)); if (hasFillStep()) result->setFillStep(getFillStep()->toAST(options)); + if (hasFillStaleness()) + result->setFillStaleness(getFillStaleness()->toAST(options)); return result; } diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h index 0ebdde61912..d9086dc9ed7 100644 --- a/src/Analyzer/SortNode.h +++ b/src/Analyzer/SortNode.h @@ -105,6 +105,24 @@ public: return children[fill_step_child_index]; } + /// Returns true if sort node has fill step, false otherwise + bool hasFillStaleness() const + { + return children[fill_staleness_child_index] != nullptr; + } + + /// Get fill step + const QueryTreeNodePtr & getFillStaleness() const + { + return children[fill_staleness_child_index]; + } + + /// Get fill step + QueryTreeNodePtr & getFillStaleness() + { + return children[fill_staleness_child_index]; + } + /// Get collator const std::shared_ptr & getCollator() const { @@ -144,7 +162,8 @@ private: static constexpr size_t fill_from_child_index = 1; static constexpr size_t fill_to_child_index = 2; static constexpr size_t fill_step_child_index = 3; - static constexpr size_t children_size = fill_step_child_index + 1; + static constexpr size_t fill_staleness_child_index = 4; + static constexpr size_t children_size = fill_staleness_child_index + 1; SortDirection sort_direction = SortDirection::ASCENDING; std::optional nulls_sort_direction; diff --git a/src/Parsers/ASTOrderByElement.cpp b/src/Parsers/ASTOrderByElement.cpp index 09193a8b5e1..d87c296d398 100644 --- a/src/Parsers/ASTOrderByElement.cpp +++ b/src/Parsers/ASTOrderByElement.cpp @@ -54,6 +54,11 @@ void ASTOrderByElement::formatImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_keyword : "") << " STEP " << (settings.hilite ? hilite_none : ""); fill_step->formatImpl(settings, state, frame); } + if (auto fill_staleness = getFillStaleness()) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " STALENESS " << (settings.hilite ? hilite_none : ""); + fill_staleness->formatImpl(settings, state, frame); + } } } diff --git a/src/Parsers/ASTOrderByElement.h b/src/Parsers/ASTOrderByElement.h index 6edf84d7bde..4dc35dac217 100644 --- a/src/Parsers/ASTOrderByElement.h +++ b/src/Parsers/ASTOrderByElement.h @@ -18,6 +18,7 @@ private: FILL_FROM, FILL_TO, FILL_STEP, + FILL_STALENESS, }; public: @@ -32,12 +33,14 @@ public: void setFillFrom(ASTPtr node) { setChild(Child::FILL_FROM, node); } void setFillTo(ASTPtr node) { setChild(Child::FILL_TO, node); } void setFillStep(ASTPtr node) { setChild(Child::FILL_STEP, node); } + void setFillStaleness(ASTPtr node) { setChild(Child::FILL_STALENESS, node); } /** Collation for locale-specific string comparison. If empty, then sorting done by bytes. */ ASTPtr getCollation() const { return getChild(Child::COLLATION); } ASTPtr getFillFrom() const { return getChild(Child::FILL_FROM); } ASTPtr getFillTo() const { return getChild(Child::FILL_TO); } ASTPtr getFillStep() const { return getChild(Child::FILL_STEP); } + ASTPtr getFillStaleness() const { return getChild(Child::FILL_STALENESS); } String getID(char) const override { return "OrderByElement"; } diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index 8ea9fb12b86..c10e4879214 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -541,6 +541,7 @@ namespace DB MR_MACROS(YY, "YY") \ MR_MACROS(YYYY, "YYYY") \ MR_MACROS(ZKPATH, "ZKPATH") \ + MR_MACROS(STALENESS, "STALENESS") \ /// The list of keywords where underscore is intentional #define APPLY_FOR_PARSER_KEYWORDS_WITH_UNDERSCORES(MR_MACROS) \ diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 31efcb16f02..ad062d27a37 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -2178,6 +2178,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ParserKeyword from(Keyword::FROM); ParserKeyword to(Keyword::TO); ParserKeyword step(Keyword::STEP); + ParserKeyword staleness(Keyword::STALENESS); ParserStringLiteral collate_locale_parser; ParserExpressionWithOptionalAlias exp_parser(false); @@ -2219,6 +2220,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ASTPtr fill_from; ASTPtr fill_to; ASTPtr fill_step; + ASTPtr fill_staleness; if (with_fill.ignore(pos, expected)) { has_with_fill = true; @@ -2230,6 +2232,9 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect if (step.ignore(pos, expected) && !exp_parser.parse(pos, fill_step, expected)) return false; + + if (staleness.ignore(pos, expected) && !exp_parser.parse(pos, fill_staleness, expected)) + return false; } auto elem = std::make_shared(); @@ -2244,6 +2249,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect elem->setFillFrom(fill_from); elem->setFillTo(fill_to); elem->setFillStep(fill_step); + elem->setFillStaleness(fill_staleness); node = elem; diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 8d3c75fdabb..f1c752aecd0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -847,6 +847,9 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, interpolate_description = std::make_shared(std::move(interpolate_actions_dag), empty_aliases); } + if (interpolate_description) + LOG_DEBUG(getLogger("addWithFillStepIfNeeded"), "InterpolateDescription: {}", interpolate_description->actions.dumpDAG()); + const auto & query_context = planner_context->getQueryContext(); const Settings & settings = query_context->getSettingsRef(); auto filling_step = std::make_unique( diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index aea304e0ecc..aa233109fa9 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -391,6 +391,9 @@ public: if (sort_node.hasFillStep()) buffer << " STEP " << calculateActionNodeName(sort_node.getFillStep()); + + if (sort_node.hasFillStaleness()) + buffer << " STALENESS " << calculateActionNodeName(sort_node.getFillStaleness()); } if (i + 1 != order_by_nodes_size) diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index af51afdef13..0a33e2f0828 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -43,7 +43,7 @@ std::pair extractWithFillValue(const QueryTreeNodePtr & node return result; } -std::pair> extractWithFillStepValue(const QueryTreeNodePtr & node) +std::pair> extractWithFillValueWithIntervalKind(const QueryTreeNodePtr & node) { const auto & constant_node = node->as(); @@ -77,7 +77,7 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) if (sort_node.hasFillStep()) { - auto extract_result = extractWithFillStepValue(sort_node.getFillStep()); + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStep()); fill_column_description.fill_step = std::move(extract_result.first); fill_column_description.step_kind = std::move(extract_result.second); } @@ -87,10 +87,30 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) fill_column_description.fill_step = Field(direction_value); } + if (sort_node.getFillStaleness()) + { + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStaleness()); + fill_column_description.fill_staleness = std::move(extract_result.first); + fill_column_description.staleness_kind = std::move(extract_result.second); + } + + /////////////////////////////////// + if (applyVisitor(FieldVisitorAccurateEquals(), fill_column_description.fill_step, Field{0})) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be zero"); + if (sort_node.hasFillStaleness()) + { + if (sort_node.hasFillFrom()) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS cannot be used together with WITH FILL FROM"); + + if (applyVisitor(FieldVisitorAccurateLessOrEqual(), fill_column_description.fill_staleness, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be less or equal zero"); + } + if (sort_node.getSortDirection() == SortDirection::ASCENDING) { if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_step, Field{0})) From c952d9d8153ce59458fdb69a208b361c7454cab1 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 25 Oct 2024 20:55:51 +0000 Subject: [PATCH 118/566] Right JOIN with parallel replicas --- .../ClusterProxy/executeQuery.cpp | 4 +- src/Planner/PlannerJoinTree.cpp | 2 + src/Planner/findParallelReplicasQuery.cpp | 52 ++++++++++++---- src/Storages/buildQueryTreeForShard.cpp | 62 +++++++++++++------ 4 files changed, 86 insertions(+), 34 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index e88fdeb0379..4b1f3094be3 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..7889a358d95 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,6 +665,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); + const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index b97a9a36381..891e5034f44 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -100,14 +100,19 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_kind = join_node.getKind(); auto join_strictness = join_node.getStrictness(); - bool can_parallelize_join = - join_kind == JoinKind::Left - || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All); - - if (!can_parallelize_join) + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + } + else + { return {}; + } - query_tree_node = join_node.getLeftTableExpression().get(); break; } default: @@ -310,13 +315,15 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - std::stack right_join_nodes; - while (query_tree_node || !right_join_nodes.empty()) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + + std::stack join_nodes; + while (query_tree_node || !join_nodes.empty()) { if (!query_tree_node) { - query_tree_node = right_join_nodes.top(); - right_join_nodes.pop(); + query_tree_node = join_nodes.top(); + join_nodes.pop(); } auto join_tree_node_type = query_tree_node->getNodeType(); @@ -365,8 +372,23 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - query_tree_node = join_node.getLeftTableExpression().get(); - right_join_nodes.push(join_node.getRightTableExpression().get()); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); + + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner and join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + join_nodes.push(join_node.getRightTableExpression().get()); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + join_nodes.push(join_node.getLeftTableExpression().get()); + } + else + { + return nullptr; + } break; } default: @@ -400,7 +422,9 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - return findTableForParallelReplicas(query_tree_node.get()); + const auto * res = findTableForParallelReplicas(query_tree_node.get()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + return res; } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -408,6 +432,8 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index bbf32c68d19..df9bfd049fb 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -314,6 +314,35 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, return temporary_table_expression_node; } +QueryTreeNodePtr getSubqueryFromTableExpression( + const QueryTreeNodePtr & join_table_expression, + const std::unordered_map & column_source_to_columns, + const ContextPtr & context) +{ + auto join_table_expression_node_type = join_table_expression->getNodeType(); + QueryTreeNodePtr subquery_node; + + if (join_table_expression_node_type == QueryTreeNodeType::QUERY || join_table_expression_node_type == QueryTreeNodeType::UNION) + { + subquery_node = join_table_expression; + } + else if ( + join_table_expression_node_type == QueryTreeNodeType::TABLE || join_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + { + const auto & columns = column_source_to_columns.at(join_table_expression).columns; + subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, join_table_expression, context); + } + else + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", + join_table_expression->formatASTForErrorMessage()); + } + + return subquery_node; +} + } QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_context, QueryTreeNodePtr query_tree_to_modify) @@ -335,37 +364,32 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { if (auto * join_node = global_in_or_join_node.query_node->as()) { - auto join_right_table_expression = join_node->getRightTableExpression(); - auto join_right_table_expression_node_type = join_right_table_expression->getNodeType(); - - QueryTreeNodePtr subquery_node; - - if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY || - join_right_table_expression_node_type == QueryTreeNodeType::UNION) + QueryTreeNodePtr join_table_expression; + const auto join_kind = join_node->getKind(); + const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) { - subquery_node = join_right_table_expression; + join_table_expression = join_node->getRightTableExpression(); } - else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE || - join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + else if (join_kind == JoinKind::Right) { - const auto & columns = column_source_to_columns.at(join_right_table_expression).columns; - subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, - join_right_table_expression, - planner_context->getQueryContext()); + join_table_expression = join_node->getLeftTableExpression(); } else { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", - join_right_table_expression->formatASTForErrorMessage()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Unexpected join kind: {}", join_kind); } + auto subquery_node + = getSubqueryFromTableExpression(join_table_expression, column_source_to_columns, planner_context->getQueryContext()); + auto temporary_table_expression_node = executeSubqueryNode(subquery_node, planner_context->getMutableQueryContext(), global_in_or_join_node.subquery_depth); - temporary_table_expression_node->setAlias(join_right_table_expression->getAlias()); + temporary_table_expression_node->setAlias(join_table_expression->getAlias()); - replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node)); + replacement_map.emplace(join_table_expression.get(), std::move(temporary_table_expression_node)); continue; } if (auto * in_function_node = global_in_or_join_node.query_node->as()) From c58afb753c3a9b394f6f88cc8fad3e13897c5e57 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Oct 2024 00:29:36 +0200 Subject: [PATCH 119/566] Retry more errors from S3 --- src/IO/S3/Client.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 9a0eccd8783..088087458c7 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -645,7 +645,7 @@ Client::doRequestWithRetryNetworkErrors(RequestType & request, RequestFn request try { /// S3 does retries network errors actually. - /// But it is matter when errors occur. + /// But it does matter when errors occur. /// This code retries a specific case when /// network error happens when XML document is being read from the response body. /// Hence, the response body is a stream, network errors are possible at reading. @@ -656,8 +656,9 @@ Client::doRequestWithRetryNetworkErrors(RequestType & request, RequestFn request /// Requests that expose the response stream as an answer are not retried with that code. E.g. GetObject. return request_fn_(request_); } - catch (Poco::Net::ConnectionResetException &) + catch (Poco::Net::NetException &) { + /// This includes "connection reset", "malformed message", and possibly other exceptions. if constexpr (IsReadMethod) { From 8807fe3bb5ff125e3a907354757552957e52b646 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 28 Oct 2024 00:57:13 +0100 Subject: [PATCH 120/566] Better log messages --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 8b3c7bdf3fb..c0464946752 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -254,7 +254,8 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart if (status == SelectPartsDecision::SELECTED) res.insert(all_partition_ids[i]); else - LOG_TEST(log, "Nothing to merge in partition {}: {}", all_partition_ids[i], out_disable_reason.text); + LOG_TEST(log, "Nothing to merge in partition {} with max_total_size_to_merge = {} (looked up {} ranges): {}", + all_partition_ids[i], ReadableSize(max_total_size_to_merge), ranges_per_partition[i].size(), out_disable_reason.text); } String best_partition_id_to_optimize = getBestPartitionToOptimizeEntire(info.partitions_info); From 7ff2d5c98114d5d364e33cc5d0db88f5a1a06b8e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 14:01:37 +0000 Subject: [PATCH 121/566] add baseline --- src/Common/FieldVisitorMul.cpp | 50 ++++++ src/Common/FieldVisitorMul.h | 53 ++++++ src/Core/Field.h | 8 + src/Core/SortDescription.h | 5 +- src/Interpreters/FillingRow.cpp | 94 +++++++++-- src/Interpreters/FillingRow.h | 9 +- .../Transforms/FillingTransform.cpp | 159 +++++++++++------- 7 files changed, 306 insertions(+), 72 deletions(-) create mode 100644 src/Common/FieldVisitorMul.cpp create mode 100644 src/Common/FieldVisitorMul.h diff --git a/src/Common/FieldVisitorMul.cpp b/src/Common/FieldVisitorMul.cpp new file mode 100644 index 00000000000..36c32c40c05 --- /dev/null +++ b/src/Common/FieldVisitorMul.cpp @@ -0,0 +1,50 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +FieldVisitorMul::FieldVisitorMul(const Field & rhs_) : rhs(rhs_) {} + +// We can add all ints as unsigned regardless of their actual signedness. +bool FieldVisitorMul::operator() (Int64 & x) const { return this->operator()(reinterpret_cast(x)); } +bool FieldVisitorMul::operator() (UInt64 & x) const +{ + x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); + return x != 0; +} + +bool FieldVisitorMul::operator() (Float64 & x) const { + x *= rhs.safeGet(); + return x != 0; +} + +bool FieldVisitorMul::operator() (Null &) const +{ + /// Do not add anything + return false; +} + +bool FieldVisitorMul::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } +bool FieldVisitorMul::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } +bool FieldVisitorMul::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } +bool FieldVisitorMul::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } +bool FieldVisitorMul::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } +bool FieldVisitorMul::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } +bool FieldVisitorMul::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } +bool FieldVisitorMul::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } +bool FieldVisitorMul::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } + +bool FieldVisitorMul::operator() (AggregateFunctionStateData &) const +{ + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); +} + +bool FieldVisitorMul::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } + +} diff --git a/src/Common/FieldVisitorMul.h b/src/Common/FieldVisitorMul.h new file mode 100644 index 00000000000..5bce41f1e71 --- /dev/null +++ b/src/Common/FieldVisitorMul.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/** Implements `*=` operation. + * Returns false if the result is zero. + */ +class FieldVisitorMul : public StaticVisitor +{ +private: + const Field & rhs; +public: + explicit FieldVisitorMul(const Field & rhs_); + + // We can add all ints as unsigned regardless of their actual signedness. + bool operator() (Int64 & x) const; + bool operator() (UInt64 & x) const; + bool operator() (Float64 & x) const; + bool operator() (Null &) const; + bool operator() (String &) const; + bool operator() (Array &) const; + bool operator() (Tuple &) const; + bool operator() (Map &) const; + bool operator() (Object &) const; + bool operator() (UUID &) const; + bool operator() (IPv4 &) const; + bool operator() (IPv6 &) const; + bool operator() (AggregateFunctionStateData &) const; + bool operator() (CustomType &) const; + bool operator() (bool &) const; + + template + bool operator() (DecimalField & x) const + { + x *= rhs.safeGet>(); + return x.getValue() != T(0); + } + + template + requires is_big_int_v + bool operator() (T & x) const + { + x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); + return x != T(0); + } +}; + +} diff --git a/src/Core/Field.h b/src/Core/Field.h index 7b916d30646..47df5c2907e 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -185,6 +185,14 @@ public: return *this; } + const DecimalField & operator *= (const DecimalField & r) + { + if (scale != r.getScale()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Multiply different decimal fields"); + dec *= r.getValue(); + return *this; + } + const DecimalField & operator -= (const DecimalField & r) { if (scale != r.getScale()) diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index 5c6f3e3150a..7a7c92f3b53 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -33,9 +33,12 @@ struct FillColumnDescription DataTypePtr fill_to_type; Field fill_step; /// Default = +1 or -1 according to direction std::optional step_kind; + Field fill_staleness; /// Default = Null - should not be considered + std::optional staleness_kind; - using StepFunction = std::function; + using StepFunction = std::function; StepFunction step_func; + StepFunction staleness_step_func; }; /// Description of the sorting rule by one column. diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 21b5b04bca3..1d3eae03ddd 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -28,6 +28,7 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); + staleness_base_row.resize(sort_description.size()); } bool FillingRow::operator<(const FillingRow & other) const @@ -63,7 +64,53 @@ bool FillingRow::isNull() const return true; } -std::pair FillingRow::next(const FillingRow & to_row) +std::optional FillingRow::doJump(const FillColumnDescription& descr, size_t column_ind) +{ + Field next_value = row[column_ind]; + descr.step_func(next_value, 1); + + if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) + return std::nullopt; + + if (!descr.fill_staleness.isNull()) { + Field staleness_border = staleness_base_row[column_ind]; + descr.staleness_step_func(staleness_border, 1); + + if (less(next_value, staleness_border, getDirection(column_ind))) + return next_value; + else + return std::nullopt; + } + + return next_value; +} + +std::optional FillingRow::doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to) +{ + Field shifted_value = row[column_ind]; + + if (less(to, shifted_value, getDirection(column_ind))) + return std::nullopt; + + for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) { + Field next_value = shifted_value; + descr.step_func(next_value, step_len); + + if (less(next_value, to, getDirection(0))) + { + shifted_value = std::move(next_value); + step_len *= 2; + } + else + { + step_len /= 2; + } + } + + return shifted_value; +} + +std::pair FillingRow::next(const FillingRow & to_row, bool long_jump) { const size_t row_size = size(); size_t pos = 0; @@ -85,23 +132,43 @@ std::pair FillingRow::next(const FillingRow & to_row) if (fill_column_desc.fill_to.isNull() || row[i].isNull()) continue; - Field next_value = row[i]; - fill_column_desc.step_func(next_value); - if (less(next_value, fill_column_desc.fill_to, getDirection(i))) + auto next_value = doJump(fill_column_desc, i); + if (next_value.has_value() && !equals(next_value.value(), fill_column_desc.fill_to)) { - row[i] = next_value; + row[i] = std::move(next_value.value()); initFromDefaults(i + 1); return {true, true}; } } - auto next_value = row[pos]; - getFillDescription(pos).step_func(next_value); + auto & fill_column_desc = getFillDescription(pos); + std::optional next_value; - if (less(to_row.row[pos], next_value, getDirection(pos)) || equals(next_value, getFillDescription(pos).fill_to)) - return {false, false}; + if (long_jump) + { + next_value = doLongJump(fill_column_desc, pos, to_row[pos]); - row[pos] = next_value; + if (!next_value.has_value()) + return {false, false}; + + Field calibration_jump_value = next_value.value(); + fill_column_desc.step_func(calibration_jump_value, 1); + + if (equals(calibration_jump_value, to_row[pos])) + next_value = calibration_jump_value; + + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + } + else + { + next_value = doJump(fill_column_desc, pos); + + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + } + + row[pos] = std::move(next_value.value()); if (equals(row[pos], to_row.row[pos])) { bool is_less = false; @@ -128,6 +195,13 @@ void FillingRow::initFromDefaults(size_t from_pos) row[i] = getFillDescription(i).fill_from; } +void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) +{ + for (size_t i = 0; i < size(); ++i) { + staleness_base_row[i] = (*base_row[i])[row_ind]; + } +} + String FillingRow::dump() const { WriteBufferFromOwnString out; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 004b417542c..14b6034ce35 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB { @@ -15,6 +15,9 @@ bool equals(const Field & lhs, const Field & rhs); */ class FillingRow { + std::optional doJump(const FillColumnDescription & descr, size_t column_ind); + std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); + public: explicit FillingRow(const SortDescription & sort_description); @@ -22,9 +25,10 @@ public: /// Return pair of boolean /// apply - true if filling values should be inserted into result set /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & to_row); + std::pair next(const FillingRow & to_row, bool long_jump); void initFromDefaults(size_t from_pos = 0); + void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } const Field & operator[](size_t index) const { return row[index]; } @@ -42,6 +46,7 @@ public: private: Row row; + Row staleness_base_row; SortDescription sort_description; }; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 95f4a674ebb..1d68f73e8c2 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -7,15 +7,17 @@ #include #include #include +#include #include #include #include +#include namespace DB { -constexpr bool debug_logging_enabled = false; +constexpr bool debug_logging_enabled = true; template void logDebug(String key, const T & value, const char * separator = " : ") @@ -60,15 +62,78 @@ static FillColumnDescription::StepFunction getStepFunction( { #define DECLARE_CASE(NAME) \ case IntervalKind::Kind::NAME: \ - return [step, scale, &date_lut](Field & field) { \ + return [step, scale, &date_lut](Field & field, Int32 jumps_count) { \ field = Add##NAME##sImpl::execute(static_cast(\ - field.safeGet()), static_cast(step), date_lut, utc_time_zone, scale); }; + field.safeGet()), static_cast(step) * jumps_count, date_lut, utc_time_zone, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE } } +static FillColumnDescription::StepFunction getStepFunction(const Field & step, const std::optional & step_kind, const DataTypePtr & type) +{ + WhichDataType which(type); + + if (step_kind) + { + if (which.isDate() || which.isDate32()) + { + Int64 avg_seconds = step.safeGet() * step_kind->toAvgSeconds(); + if (std::abs(avg_seconds) < 86400) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); + } + + if (which.isDate()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (which.isDate32()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (const auto * date_time = checkAndGetDataType(type.get())) + return getStepFunction(step_kind.value(), step.safeGet(), date_time->getTimeZone()); + else if (const auto * date_time64 = checkAndGetDataType(type.get())) + { + const auto & step_dec = step.safeGet &>(); + Int64 converted_step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); + static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); + + switch (step_kind.value()) // NOLINT(bugprone-switch-missing-default-case) + { +#define DECLARE_CASE(NAME) \ + case IntervalKind::Kind::NAME: \ + return [converted_step, &time_zone = date_time64->getTimeZone()](Field & field, Int32 jumps_count) \ + { \ + auto field_decimal = field.safeGet>(); \ + auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), converted_step * jumps_count, time_zone, utc_time_zone, field_decimal.getScale()); \ + field = DecimalField(res, field_decimal.getScale()); \ + }; \ + break; + + FOR_EACH_INTERVAL_KIND(DECLARE_CASE) +#undef DECLARE_CASE + } + } + else + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); + } + else + { + return [step](Field & field, Int32 jumps_count) + { + auto shifted_step = step; + if (jumps_count != 1) + applyVisitor(FieldVisitorMul(jumps_count), shifted_step); + + logDebug("field", field.dump()); + logDebug("step", step.dump()); + logDebug("shifted field", shifted_step.dump()); + + applyVisitor(FieldVisitorSum(shifted_step), field); + }; + } +} + static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & type) { auto max_type = Field::Types::Null; @@ -125,7 +190,8 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & if (descr.fill_from.getType() > max_type || descr.fill_to.getType() > max_type - || descr.fill_step.getType() > max_type) + || descr.fill_step.getType() > max_type + || descr.fill_staleness.getType() > max_type) return false; if (!descr.fill_from.isNull()) @@ -134,56 +200,11 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & descr.fill_to = convertFieldToTypeOrThrow(descr.fill_to, *to_type); if (!descr.fill_step.isNull()) descr.fill_step = convertFieldToTypeOrThrow(descr.fill_step, *to_type); + if (!descr.fill_staleness.isNull()) + descr.fill_staleness = convertFieldToTypeOrThrow(descr.fill_staleness, *to_type); - if (descr.step_kind) - { - if (which.isDate() || which.isDate32()) - { - Int64 avg_seconds = descr.fill_step.safeGet() * descr.step_kind->toAvgSeconds(); - if (std::abs(avg_seconds) < 86400) - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); - } - - if (which.isDate()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (which.isDate32()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (const auto * date_time = checkAndGetDataType(type.get())) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), date_time->getTimeZone()); - else if (const auto * date_time64 = checkAndGetDataType(type.get())) - { - const auto & step_dec = descr.fill_step.safeGet &>(); - Int64 step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); - static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); - - switch (*descr.step_kind) // NOLINT(bugprone-switch-missing-default-case) - { -#define DECLARE_CASE(NAME) \ - case IntervalKind::Kind::NAME: \ - descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ - { \ - auto field_decimal = field.safeGet>(); \ - auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, utc_time_zone, field_decimal.getScale()); \ - field = DecimalField(res, field_decimal.getScale()); \ - }; \ - break; - - FOR_EACH_INTERVAL_KIND(DECLARE_CASE) -#undef DECLARE_CASE - } - } - else - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); - } - else - { - descr.step_func = [step = descr.fill_step](Field & field) - { - applyVisitor(FieldVisitorSum(step), field); - }; - } + descr.step_func = getStepFunction(descr.fill_step, descr.step_kind, type); + descr.staleness_step_func = getStepFunction(descr.fill_staleness, descr.staleness_kind, type); return true; } @@ -482,8 +503,8 @@ bool FillingTransform::generateSuffixIfNeeded( MutableColumnRawPtrs res_sort_prefix_columns, MutableColumnRawPtrs res_other_columns) { - logDebug("generateSuffixIfNeeded() filling_row", filling_row); - logDebug("generateSuffixIfNeeded() next_row", next_row); + logDebug("generateSuffixIfNeeded filling_row", filling_row); + logDebug("generateSuffixIfNeeded next_row", next_row); /// Determines if we should insert filling row before start generating next rows bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || next_row.isNull(); @@ -492,11 +513,11 @@ bool FillingTransform::generateSuffixIfNeeded( for (size_t i = 0, size = filling_row.size(); i < size; ++i) next_row[i] = filling_row.getFillDescription(i).fill_to; - logDebug("generateSuffixIfNeeded() next_row updated", next_row); + logDebug("generateSuffixIfNeeded next_row updated", next_row); if (filling_row >= next_row) { - logDebug("generateSuffixIfNeeded()", "no need to generate suffix"); + logDebug("generateSuffixIfNeeded", "no need to generate suffix"); return false; } @@ -516,7 +537,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); filling_row_changed = changed; if (!apply) break; @@ -593,6 +614,9 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[range_begin]; const auto & fill_from = filling_row.getFillDescription(i).fill_from; + logDebug("current value", current_value.dump()); + logDebug("fill from", fill_from.dump()); + if (!fill_from.isNull() && !equals(current_value, fill_from)) { filling_row.initFromDefaults(i); @@ -609,6 +633,9 @@ void FillingTransform::transformRange( } } + /// Init staleness first interval + filling_row.initStalenessRow(input_fill_columns, range_begin); + for (size_t row_ind = range_begin; row_ind < range_end; ++row_ind) { logDebug("row", row_ind); @@ -623,6 +650,9 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[row_ind]; const auto & fill_to = filling_row.getFillDescription(i).fill_to; + logDebug("current value", current_value.dump()); + logDebug("fill to", fill_to.dump()); + if (fill_to.isNull() || less(current_value, fill_to, filling_row.getDirection(i))) next_row[i] = current_value; else @@ -643,7 +673,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); filling_row_changed = changed; if (!apply) break; @@ -652,6 +682,14 @@ void FillingTransform::transformRange( insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } + + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); + logDebug("apply", apply); + logDebug("changed", changed); + + if (changed) + filling_row_changed = true; + /// new valid filling row was generated but not inserted, will use it during suffix generation if (filling_row_changed) filling_row_inserted = false; @@ -662,6 +700,9 @@ void FillingTransform::transformRange( copyRowFromColumns(res_interpolate_columns, input_interpolate_columns, row_ind); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); + + /// Init next staleness interval with current row, because we have already made the long jump to it + filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix From 8f9d577c453573d82a529186fde60697d509e6f2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:12:59 -0400 Subject: [PATCH 122/566] add enable_job_stack_trace to change history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index d958d091975..02601f12d56 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -68,6 +68,7 @@ static std::initializer_list Date: Sat, 26 Oct 2024 19:35:33 +0200 Subject: [PATCH 123/566] CI: Functional Tests with praktika --- .github/workflows/pr.yaml | 287 ++++++++++++++++++++ .github/workflows/pull_request.yml | 212 --------------- ci/jobs/build_clickhouse.py | 12 +- ci/jobs/check_style.py | 2 +- ci/jobs/fast_test.py | 2 +- ci/jobs/functional_stateless_tests.py | 48 ++++ ci/jobs/scripts/functional_tests_results.py | 3 +- ci/praktika/__main__.py | 23 +- ci/praktika/_environment.py | 3 +- ci/praktika/_settings.py | 3 +- ci/praktika/hook_cache.py | 6 +- ci/praktika/json.html | 50 +++- ci/praktika/result.py | 2 +- ci/praktika/runner.py | 56 ++-- ci/praktika/yaml_generator.py | 6 +- ci/settings/definitions.py | 1 + ci/settings/settings.py | 2 + ci/workflows/pull_request.py | 16 +- 18 files changed, 477 insertions(+), 257 deletions(-) create mode 100644 .github/workflows/pr.yaml delete mode 100644 .github/workflows/pull_request.yml create mode 100644 ci/jobs/functional_stateless_tests.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000000..34c794f6088 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,287 @@ +# generated by praktika + +name: PR + +on: + pull_request: + branches: ['master'] + +# Cancel the previous wf run in PRs. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} + +# Allow updating GH commit statuses and PR comments to post an actual job reports link +permissions: write-all + +jobs: + + config_workflow: + runs-on: [ci_services] + needs: [] + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + docker_builds: + runs-on: [ci_services_ebs] + needs: [config_workflow] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIEJ1aWxkcw==') }} + name: "Docker Builds" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + style_check: + runs-on: [ci_services] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgQ2hlY2s=') }} + name: "Style Check" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + fast_test: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }} + name: "Fast test" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + build_amd64_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} + name: "Build amd64 debug" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd64_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} + name: "Stateless tests (amd, debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + finish_workflow: + runs-on: [ci_services] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + if: ${{ !cancelled() }} + name: "Finish Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml deleted file mode 100644 index e4eb44b2774..00000000000 --- a/.github/workflows/pull_request.yml +++ /dev/null @@ -1,212 +0,0 @@ -# yamllint disable rule:comments-indentation -name: PullRequestCI - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - pull_request: - types: - - synchronize - - reopened - - opened - branches: - - master - -# Cancel the previous wf run in PRs. -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - RunConfig: - runs-on: [self-hosted, style-checker-aarch64] - outputs: - data: ${{ steps.runconfig.outputs.CI_DATA }} - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true # to ensure correct digests - fetch-depth: 0 # to get a version - filter: tree:0 - - name: Debug Info - uses: ./.github/actions/debug - - name: Set pending Sync status - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status - - name: Labels check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 run_check.py - - name: Python unit tests - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - echo "Testing the main ci directory" - python3 -m unittest discover -s . -p 'test_*.py' - - name: PrepareRunConfig - id: runconfig - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json - - echo "::group::CI configuration" - python3 -m json.tool ${{ runner.temp }}/ci_run_data.json - echo "::endgroup::" - - { - echo 'CI_DATA<> "$GITHUB_OUTPUT" - - name: Re-create GH statuses for skipped jobs if any - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses - BuildDockers: - needs: [RunConfig] - if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }} - uses: ./.github/workflows/docker_test_images.yml - with: - data: ${{ needs.RunConfig.outputs.data }} - StyleCheck: - needs: [RunConfig, BuildDockers] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}} - uses: ./.github/workflows/reusable_test.yml - with: - test_name: Style check - runner_type: style-checker-aarch64 - run_command: | - python3 style_check.py - data: ${{ needs.RunConfig.outputs.data }} - secrets: - secret_envs: | - ROBOT_CLICKHOUSE_SSH_KEY< "$WORKFLOW_RESULT_FILE" << 'EOF' - ${{ toJson(needs) }} - EOF - python3 merge_pr.py --set-ci-status - - name: Check Workflow results - uses: ./.github/actions/check_workflow - with: - needs: ${{ toJson(needs) }} - - ################################# Stage Final ################################# - # - FinishCheck: - if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - filter: tree:0 - - name: Finish label - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - -############################################################################################# -###################################### JEPSEN TESTS ######################################### -############################################################################################# - # This is special test NOT INCLUDED in FinishCheck - # When it's skipped, all dependent tasks will be skipped too. - # DO NOT add it there - Jepsen: - # we need concurrency as the job uses dedicated instances in the cloud - concurrency: - group: jepsen - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }} - needs: [RunConfig, Builds_1] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: ClickHouse Keeper Jepsen - runner_type: style-checker-aarch64 - data: ${{ needs.RunConfig.outputs.data }} diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 21ed8091608..cfa358b4059 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -13,8 +13,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) + parser.add_argument( + "--param", + help="Optional user-defined job start stage (for local run)", + default=None, + ) return parser.parse_args() @@ -95,7 +101,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/check_style.py b/ci/jobs/check_style.py index f9cdc76302d..d4b81abc92c 100644 --- a/ci/jobs/check_style.py +++ b/ci/jobs/check_style.py @@ -379,4 +379,4 @@ if __name__ == "__main__": ) ) - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index 1dcd65b6ed2..dc5e1c975a6 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -330,7 +330,7 @@ def main(): CH.terminate() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py new file mode 100644 index 00000000000..dfdd5821a19 --- /dev/null +++ b/ci/jobs/functional_stateless_tests.py @@ -0,0 +1,48 @@ +import argparse + +from praktika.result import Result +from praktika.settings import Settings +from praktika.utils import MetaClasses, Shell, Utils + + +class JobStages(metaclass=MetaClasses.WithIter): + CHECKOUT_SUBMODULES = "checkout" + CMAKE = "cmake" + BUILD = "build" + + +def parse_args(): + parser = argparse.ArgumentParser(description="ClickHouse Build Job") + parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument("--param", help="Optional custom job start stage", default=None) + return parser.parse_args() + + +def main(): + + args = parse_args() + + stop_watch = Utils.Stopwatch() + + stages = list(JobStages) + stage = args.param or JobStages.CHECKOUT_SUBMODULES + if stage: + assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" + print(f"Job will start from stage [{stage}]") + while stage in stages: + stages.pop(0) + stages.insert(0, stage) + + res = True + results = [] + + if res and JobStages.CHECKOUT_SUBMODULES in stages: + info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") + results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + res = results[-1].is_ok() + + Result.create_from(results=results, stopwatch=stop_watch).complete_job() + + +if __name__ == "__main__": + main() diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index 5ac9d6b985d..aba3e4f7f5b 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -1,7 +1,6 @@ import dataclasses from typing import List -from praktika.environment import Environment from praktika.result import Result OK_SIGN = "[ OK " @@ -250,7 +249,7 @@ class FTResultsProcessor: # test_results.sort(key=test_result_comparator) return Result.create_from( - name=Environment.JOB_NAME, + name="Tests", results=test_results, status=state, files=[self.tests_output_file], diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index 7f472ecd9ae..fbb9f92909a 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,24 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--pr", + help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", + type=int, + default=None, + ) + run_parser.add_argument( + "--sha", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that sha, head sha will be used if not set", + type=str, + default=None, + ) + run_parser.add_argument( + "--branch", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that branch, main branch name will be used if not set", + type=str, + default=None, + ) run_parser.add_argument( "--ci", help="When not set - dummy env will be generated, for local test", @@ -85,9 +103,12 @@ if __name__ == "__main__": workflow=workflow, job=job, docker=args.docker, - dummy_env=not args.ci, + local_run=not args.ci, no_docker=args.no_docker, param=args.param, + pr=args.pr, + branch=args.branch, + sha=args.sha, ) else: parser.print_help() diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index ce9c6f5b486..4ac8ad319f9 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -159,7 +159,8 @@ class _Environment(MetaClasses.Serializable): @classmethod def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False): prefix = "" - if pr_number > 0: + assert sha or latest + if pr_number and pr_number > 0: prefix += f"{pr_number}" else: prefix += f"{branch}" diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 3052d8ef877..1777257f484 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -1,5 +1,4 @@ import dataclasses -from pathlib import Path from typing import Dict, Iterable, List, Optional @@ -8,6 +7,7 @@ class _Settings: ###################################### # Pipeline generation settings # ###################################### + MAIN_BRANCH = "main" CI_PATH = "./ci" WORKFLOW_PATH_PREFIX: str = "./.github/workflows" WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" @@ -111,6 +111,7 @@ _USER_DEFINED_SETTINGS = [ "CI_DB_INSERT_TIMEOUT_SEC", "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", + "MAIN_BRANCH", ] diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index b1b5c654f20..5cfedec0144 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -8,11 +8,9 @@ from praktika.utils import Utils class CacheRunnerHooks: @classmethod - def configure(cls, _workflow): - workflow_config = RunConfig.from_fs(_workflow.name) + def configure(cls, workflow): + workflow_config = RunConfig.from_fs(workflow.name) cache = Cache() - assert _Environment.get().WORKFLOW_NAME - workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0] print(f"Workflow Configure, workflow [{workflow.name}]") assert ( workflow.enable_cache diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 2f8c3e45d0b..af03ed702f8 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -89,6 +89,17 @@ letter-spacing: -0.5px; } + .dropdown-value { + width: 100px; + font-weight: normal; + font-family: inherit; + background-color: transparent; + color: inherit; + /*border: none;*/ + /*outline: none;*/ + /*cursor: pointer;*/ + } + #result-container { background-color: var(--tile-background); margin-left: calc(var(--status-width) + 20px); @@ -282,6 +293,12 @@ } } + function updateUrlParameter(paramName, paramValue) { + const url = new URL(window.location.href); + url.searchParams.set(paramName, paramValue); + window.location.href = url.toString(); + } + // Attach the toggle function to the click event of the icon document.getElementById('theme-toggle').addEventListener('click', toggleTheme); @@ -291,14 +308,14 @@ const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; const month = monthNames[date.getMonth()]; - const year = date.getFullYear(); + //const year = date.getFullYear(); const hours = String(date.getHours()).padStart(2, '0'); const minutes = String(date.getMinutes()).padStart(2, '0'); const seconds = String(date.getSeconds()).padStart(2, '0'); //const milliseconds = String(date.getMilliseconds()).padStart(2, '0'); return showDate - ? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}` + ? `${day}'${month} ${hours}:${minutes}:${seconds}` : `${hours}:${minutes}:${seconds}`; } @@ -346,7 +363,7 @@ return 'status-other'; } - function addKeyValueToStatus(key, value) { + function addKeyValueToStatus(key, value, options = null) { const statusContainer = document.getElementById('status-container'); @@ -357,10 +374,25 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - const valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - + let valueElement + if (value) { + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value; + } else if (options) { + valueElement = document.createElement('select'); + valueElement.className = 'dropdown-value'; + valueElement.addEventListener('change', (event) => { + const selectedValue = event.target.value; + updateUrlParameter(key, selectedValue); + }); + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue; + valueElement.appendChild(option); + }); + } keyValuePair.appendChild(keyElement) keyValuePair.appendChild(valueElement) statusContainer.appendChild(keyValuePair); @@ -487,7 +519,7 @@ const columnSymbols = { name: '📂', - status: '✔️', + status: '⏯️', start_time: '🕒', duration: '⏳', info: 'ℹ️', @@ -726,7 +758,7 @@ } else { console.error("TODO") } - addKeyValueToStatus("sha", sha); + addKeyValueToStatus("sha", null, [sha, 'lala']); if (nameParams[1]) { addKeyValueToStatus("job", nameParams[1]); } diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 3d3c986d5f9..2ba8309ad60 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -318,7 +318,7 @@ class Result(MetaClasses.Serializable): files=[log_file] if log_file else None, ) - def finish_job_accordingly(self): + def complete_job(self): self.dump() if not self.is_ok(): print("ERROR: Job Failed") diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 797a799a74d..823c7e0f36d 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -19,7 +19,7 @@ from praktika.utils import Shell, TeePopen, Utils class Runner: @staticmethod - def generate_dummy_environment(workflow, job): + def generate_local_run_environment(workflow, job, pr=None, branch=None, sha=None): print("WARNING: Generate dummy env for local test") Shell.check( f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}" @@ -28,9 +28,9 @@ class Runner: WORKFLOW_NAME=workflow.name, JOB_NAME=job.name, REPOSITORY="", - BRANCH="", - SHA="", - PR_NUMBER=-1, + BRANCH=branch or Settings.MAIN_BRANCH if not pr else "", + SHA=sha or Shell.get_output("git rev-parse HEAD"), + PR_NUMBER=pr or -1, EVENT_TYPE="", JOB_OUTPUT_STREAM="", EVENT_FILE_PATH="", @@ -86,7 +86,7 @@ class Runner: return 0 - def _pre_run(self, workflow, job): + def _pre_run(self, workflow, job, local_run=False): env = _Environment.get() result = Result( @@ -96,9 +96,10 @@ class Runner: ) result.dump() - if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: - print("Update Job and Workflow Report") - HtmlRunnerHooks.pre_run(workflow, job) + if not local_run: + if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: + print("Update Job and Workflow Report") + HtmlRunnerHooks.pre_run(workflow, job) print("Download required artifacts") required_artifacts = [] @@ -133,11 +134,17 @@ class Runner: env.dump() if job.run_in_docker and not no_docker: - # TODO: add support for any image, including not from ci config (e.g. ubuntu:latest) - docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[ - job.run_in_docker - ] - docker = docker or f"{job.run_in_docker}:{docker_tag}" + if ":" in job.run_in_docker: + docker_name, docker_tag = job.run_in_docker.split(":") + print( + f"WARNING: Job [{job.name}] use custom docker image with a tag - praktika won't control docker version" + ) + else: + docker_name, docker_tag = ( + job.run_in_docker, + RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], + ) + docker = docker or f"{docker_name}:{docker_tag}" cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -285,14 +292,23 @@ class Runner: return True def run( - self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None + self, + workflow, + job, + docker="", + local_run=False, + no_docker=False, + param=None, + pr=None, + sha=None, + branch=None, ): res = True setup_env_code = -10 prerun_code = -10 run_code = -10 - if res and not dummy_env: + if res and not local_run: print( f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ===" ) @@ -309,13 +325,15 @@ class Runner: traceback.print_exc() print(f"=== Setup env finished ===\n\n") else: - self.generate_dummy_environment(workflow, job) + self.generate_local_run_environment( + workflow, job, pr=pr, branch=branch, sha=sha + ) - if res and not dummy_env: + if res: res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: - prerun_code = self._pre_run(workflow, job) + prerun_code = self._pre_run(workflow, job, local_run=local_run) res = prerun_code == 0 if not res: print(f"ERROR: Pre-run failed with exit code [{prerun_code}]") @@ -339,7 +357,7 @@ class Runner: traceback.print_exc() print(f"=== Run scrip finished ===\n\n") - if not dummy_env: + if not local_run: print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===") self._post_run(workflow, job, setup_env_code, prerun_code, run_code) print(f"=== Post run scrip finished ===") diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 00c469fec0c..fb918b4ddba 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -102,7 +102,11 @@ jobs: run: | . /tmp/praktika_setup_env.sh set -o pipefail - {PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG} + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee /tmp/praktika/praktika_run.log + fi {UPLOADS_GITHUB}\ """ diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 176e865e6f3..c67bdee015b 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -231,3 +231,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD_AMD_DEBUG = "Build amd64 debug" + STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/settings/settings.py b/ci/settings/settings.py index 8d5e7bc3c87..0f3b1efcee0 100644 --- a/ci/settings/settings.py +++ b/ci/settings/settings.py @@ -4,6 +4,8 @@ from ci.settings.definitions import ( RunnerLabels, ) +MAIN_BRANCH = "master" + S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts" CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES] DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS] diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 74129177efb..c7715b40fca 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -59,6 +59,19 @@ job_build_amd_debug = Job.Config( provides=[ArtifactNames.ch_debug_binary], ) +stateless_tests_job = Job.Config( + name=JobNames.STATELESS_TESTS, + runs_on=[RunnerLabels.BUILDER], + command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", + run_in_docker="clickhouse/fasttest:latest", + digest_config=Job.CacheDigestConfig( + include_paths=[ + "./ci/jobs/functional_stateless_tests.py", + ], + ), + requires=[ArtifactNames.ch_debug_binary], +) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -67,6 +80,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, job_build_amd_debug, + stateless_tests_job, ], artifacts=[ Artifact.Config( @@ -91,4 +105,4 @@ if __name__ == "__main__": # local job test inside praktika environment from praktika.runner import Runner - Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True) + Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) From de046be699582986482dff34ff4427ecf01f2bf9 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 15:13:33 +0000 Subject: [PATCH 124/566] change mul to scale --- src/Common/FieldVisitorMul.cpp | 50 ----------------- src/Common/FieldVisitorMul.h | 53 ------------------- src/Common/FieldVisitorScale.cpp | 30 +++++++++++ src/Common/FieldVisitorScale.h | 46 ++++++++++++++++ .../Transforms/FillingTransform.cpp | 4 +- 5 files changed, 78 insertions(+), 105 deletions(-) delete mode 100644 src/Common/FieldVisitorMul.cpp delete mode 100644 src/Common/FieldVisitorMul.h create mode 100644 src/Common/FieldVisitorScale.cpp create mode 100644 src/Common/FieldVisitorScale.h diff --git a/src/Common/FieldVisitorMul.cpp b/src/Common/FieldVisitorMul.cpp deleted file mode 100644 index 36c32c40c05..00000000000 --- a/src/Common/FieldVisitorMul.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - -FieldVisitorMul::FieldVisitorMul(const Field & rhs_) : rhs(rhs_) {} - -// We can add all ints as unsigned regardless of their actual signedness. -bool FieldVisitorMul::operator() (Int64 & x) const { return this->operator()(reinterpret_cast(x)); } -bool FieldVisitorMul::operator() (UInt64 & x) const -{ - x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); - return x != 0; -} - -bool FieldVisitorMul::operator() (Float64 & x) const { - x *= rhs.safeGet(); - return x != 0; -} - -bool FieldVisitorMul::operator() (Null &) const -{ - /// Do not add anything - return false; -} - -bool FieldVisitorMul::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } -bool FieldVisitorMul::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } -bool FieldVisitorMul::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } -bool FieldVisitorMul::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } -bool FieldVisitorMul::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } -bool FieldVisitorMul::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } -bool FieldVisitorMul::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } -bool FieldVisitorMul::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } -bool FieldVisitorMul::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } - -bool FieldVisitorMul::operator() (AggregateFunctionStateData &) const -{ - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); -} - -bool FieldVisitorMul::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } - -} diff --git a/src/Common/FieldVisitorMul.h b/src/Common/FieldVisitorMul.h deleted file mode 100644 index 5bce41f1e71..00000000000 --- a/src/Common/FieldVisitorMul.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB -{ - -/** Implements `*=` operation. - * Returns false if the result is zero. - */ -class FieldVisitorMul : public StaticVisitor -{ -private: - const Field & rhs; -public: - explicit FieldVisitorMul(const Field & rhs_); - - // We can add all ints as unsigned regardless of their actual signedness. - bool operator() (Int64 & x) const; - bool operator() (UInt64 & x) const; - bool operator() (Float64 & x) const; - bool operator() (Null &) const; - bool operator() (String &) const; - bool operator() (Array &) const; - bool operator() (Tuple &) const; - bool operator() (Map &) const; - bool operator() (Object &) const; - bool operator() (UUID &) const; - bool operator() (IPv4 &) const; - bool operator() (IPv6 &) const; - bool operator() (AggregateFunctionStateData &) const; - bool operator() (CustomType &) const; - bool operator() (bool &) const; - - template - bool operator() (DecimalField & x) const - { - x *= rhs.safeGet>(); - return x.getValue() != T(0); - } - - template - requires is_big_int_v - bool operator() (T & x) const - { - x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); - return x != T(0); - } -}; - -} diff --git a/src/Common/FieldVisitorScale.cpp b/src/Common/FieldVisitorScale.cpp new file mode 100644 index 00000000000..fdb566007c3 --- /dev/null +++ b/src/Common/FieldVisitorScale.cpp @@ -0,0 +1,30 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +FieldVisitorScale::FieldVisitorScale(Int32 rhs_) : rhs(rhs_) {} + +void FieldVisitorScale::operator() (Int64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ } + +void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } +void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } +void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } +void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } +void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } +void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } +void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } +void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } +void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } +void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); } +void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } + +} diff --git a/src/Common/FieldVisitorScale.h b/src/Common/FieldVisitorScale.h new file mode 100644 index 00000000000..45bacdccc9c --- /dev/null +++ b/src/Common/FieldVisitorScale.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include +#include "base/Decimal.h" +#include "base/extended_types.h" + +namespace DB +{ + +/** Implements `*=` operation by number + */ +class FieldVisitorScale : public StaticVisitor +{ +private: + Int32 rhs; + +public: + explicit FieldVisitorScale(Int32 rhs_); + + void operator() (Int64 & x) const; + void operator() (UInt64 & x) const; + void operator() (Float64 & x) const; + void operator() (Null &) const; + [[noreturn]] void operator() (String &) const; + [[noreturn]] void operator() (Array &) const; + [[noreturn]] void operator() (Tuple &) const; + [[noreturn]] void operator() (Map &) const; + [[noreturn]] void operator() (Object &) const; + [[noreturn]] void operator() (UUID &) const; + [[noreturn]] void operator() (IPv4 &) const; + [[noreturn]] void operator() (IPv6 &) const; + [[noreturn]] void operator() (AggregateFunctionStateData &) const; + [[noreturn]] void operator() (CustomType &) const; + [[noreturn]] void operator() (bool &) const; + + template + void operator() (DecimalField & x) const { x = DecimalField(x.getValue() * T(rhs), x.getScale()); } + + template + requires is_big_int_v + void operator() (T & x) const { x *= rhs; } +}; + +} diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 1d68f73e8c2..54331186302 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -123,7 +123,7 @@ static FillColumnDescription::StepFunction getStepFunction(const Field & step, c { auto shifted_step = step; if (jumps_count != 1) - applyVisitor(FieldVisitorMul(jumps_count), shifted_step); + applyVisitor(FieldVisitorScale(jumps_count), shifted_step); logDebug("field", field.dump()); logDebug("step", step.dump()); From 2c3363e40e1856f7b5ce8eb23c301ee2ee403f36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Mon, 28 Oct 2024 19:00:37 +0300 Subject: [PATCH 125/566] Hard limit on replicated tables, dicts, views --- src/Common/CurrentMetrics.cpp | 1 + src/Core/ServerSettings.cpp | 3 + src/Databases/DatabasesCommon.cpp | 6 +- src/Interpreters/InterpreterCreateQuery.cpp | 63 ++++++++++++++++--- src/Interpreters/InterpreterCreateQuery.h | 2 + src/Storages/Utils.cpp | 16 +++-- src/Storages/Utils.h | 2 +- .../test_table_db_num_limit/config/config.xml | 12 ++++ .../test_table_db_num_limit/test.py | 43 ++++++++++--- 9 files changed, 121 insertions(+), 27 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index e9d5e07c914..542838813de 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -242,6 +242,7 @@ M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \ M(AttachedDatabase, "Active databases.") \ M(AttachedTable, "Active tables.") \ + M(AttachedReplicatedTable, "Active replicated tables.") \ M(AttachedView, "Active views.") \ M(AttachedDictionary, "Active dictionaries.") \ M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \ diff --git a/src/Core/ServerSettings.cpp b/src/Core/ServerSettings.cpp index 8c0864e78b7..2240b45a49f 100644 --- a/src/Core/ServerSettings.cpp +++ b/src/Core/ServerSettings.cpp @@ -128,7 +128,10 @@ namespace DB M(UInt64, max_database_num_to_warn, 1000lu, "If the number of databases is greater than this value, the server will create a warning that will displayed to user.", 0) \ M(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \ M(UInt64, max_table_num_to_throw, 0lu, "If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \ + M(UInt64, max_replicated_table_num_to_throw, 0lu, "If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \ M(UInt64, max_database_num_to_throw, 0lu, "If number of databases is greater than this value, server will throw an exception. 0 means no limitation.", 0) \ + M(UInt64, max_dictionary_num_to_throw, 0lu, "If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \ + M(UInt64, max_view_num_to_throw, 0lu, "If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \ M(UInt64, max_authentication_methods_per_user, 100, "The maximum number of authentication methods a user can be created with or altered. Changing this setting does not affect existing users. Zero means unlimited", 0) \ M(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \ M(UInt64, concurrent_threads_soft_limit_ratio_to_cores, 0, "Same as concurrent_threads_soft_limit_num, but with ratio to cores.", 0) \ diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index d26ec9d6eec..23d199cd160 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -382,7 +382,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n if (!table_storage->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name)) { LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name); - CurrentMetrics::sub(getAttachedCounterForStorage(table_storage)); + for (auto metric : getAttachedCountersForStorage(table_storage)) + CurrentMetrics::sub(metric); } auto table_id = table_storage->getStorageID(); @@ -430,7 +431,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c if (!table->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name)) { LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name); - CurrentMetrics::add(getAttachedCounterForStorage(table)); + for (auto metric : getAttachedCountersForStorage(table)) + CurrentMetrics::add(metric); } } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 6057afefd02..f8e85733911 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -98,6 +98,9 @@ namespace CurrentMetrics { extern const Metric AttachedTable; + extern const Metric AttachedReplicatedTable; + extern const Metric AttachedDictionary; + extern const Metric AttachedView; } namespace DB @@ -146,7 +149,10 @@ namespace ServerSetting { extern const ServerSettingsBool ignore_empty_sql_security_in_create_view_query; extern const ServerSettingsUInt64 max_database_num_to_throw; + extern const ServerSettingsUInt64 max_dictionary_num_to_throw; extern const ServerSettingsUInt64 max_table_num_to_throw; + extern const ServerSettingsUInt64 max_replicated_table_num_to_throw; + extern const ServerSettingsUInt64 max_view_num_to_throw; } namespace ErrorCodes @@ -1914,16 +1920,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } } - UInt64 table_num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_table_num_to_throw]; - if (table_num_limit > 0 && !internal) - { - UInt64 table_count = CurrentMetrics::get(CurrentMetrics::AttachedTable); - if (table_count >= table_num_limit) - throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many tables. " - "The limit (server configuration parameter `max_table_num_to_throw`) is set to {}, the current number of tables is {}", - table_num_limit, table_count); - } + if (!internal) + throwIfTooManyEntities(create, res); database->createTable(getContext(), create.getTable(), res, query_ptr); @@ -1950,6 +1948,51 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } +void InterpreterCreateQuery::throwIfTooManyEntities(ASTCreateQuery & create, StoragePtr storage) const +{ + if (auto * replicated_storage = typeid_cast(storage.get())) + { + UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_replicated_table_num_to_throw]; + UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedReplicatedTable); + if (attached_count >= num_limit) + throw Exception(ErrorCodes::TOO_MANY_TABLES, + "Too many replicated tables. " + "The limit (server configuration parameter `max_replicated_table_num_to_throw`) is set to {}, the current number is {}", + num_limit, attached_count); + } + else if (create.is_dictionary) + { + UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_dictionary_num_to_throw]; + UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedDictionary); + if (attached_count >= num_limit) + throw Exception(ErrorCodes::TOO_MANY_TABLES, + "Too many dictionaries. " + "The limit (server configuration parameter `max_dictionary_num_to_throw`) is set to {}, the current number is {}", + num_limit, attached_count); + } + else if (create.isView()) + { + UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_view_num_to_throw]; + UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedView); + if (attached_count >= num_limit) + throw Exception(ErrorCodes::TOO_MANY_TABLES, + "Too many views. " + "The limit (server configuration parameter `max_view_num_to_throw`) is set to {}, the current number is {}", + num_limit, attached_count); + } + else + { + UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_table_num_to_throw]; + UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedTable); + if (attached_count >= num_limit) + throw Exception(ErrorCodes::TOO_MANY_TABLES, + "Too many tables. " + "The limit (server configuration parameter `max_table_num_to_throw`) is set to {}, the current number is {}", + num_limit, attached_count); + } +} + + BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, const InterpreterCreateQuery::TableProperties & properties, LoadingStrictnessLevel mode) { diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index cb7af25383e..24cf308951c 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -122,6 +122,8 @@ private: BlockIO executeQueryOnCluster(ASTCreateQuery & create); + void throwIfTooManyEntities(ASTCreateQuery & create, StoragePtr storage) const; + ASTPtr query_ptr; /// Skip safety threshold when loading tables. diff --git a/src/Storages/Utils.cpp b/src/Storages/Utils.cpp index bd03a96c7cc..72aeb0d158d 100644 --- a/src/Storages/Utils.cpp +++ b/src/Storages/Utils.cpp @@ -1,10 +1,13 @@ +#include #include #include +#include namespace CurrentMetrics { extern const Metric AttachedTable; + extern const Metric AttachedReplicatedTable; extern const Metric AttachedView; extern const Metric AttachedDictionary; } @@ -12,17 +15,20 @@ namespace CurrentMetrics namespace DB { - CurrentMetrics::Metric getAttachedCounterForStorage(const StoragePtr & storage) + std::vector getAttachedCountersForStorage(const StoragePtr & storage) { if (storage->isView()) { - return CurrentMetrics::AttachedView; + return {CurrentMetrics::AttachedView}; } if (storage->isDictionary()) { - return CurrentMetrics::AttachedDictionary; + return {CurrentMetrics::AttachedDictionary}; } - - return CurrentMetrics::AttachedTable; + if (auto * replicated_storage = typeid_cast(storage.get())) + { + return {CurrentMetrics::AttachedTable, CurrentMetrics::AttachedReplicatedTable}; + } + return {CurrentMetrics::AttachedTable}; } } diff --git a/src/Storages/Utils.h b/src/Storages/Utils.h index c86c2a4c341..eb302178485 100644 --- a/src/Storages/Utils.h +++ b/src/Storages/Utils.h @@ -6,5 +6,5 @@ namespace DB { - CurrentMetrics::Metric getAttachedCounterForStorage(const StoragePtr & storage); + std::vector getAttachedCountersForStorage(const StoragePtr & storage); } diff --git a/tests/integration/test_table_db_num_limit/config/config.xml b/tests/integration/test_table_db_num_limit/config/config.xml index 9a573b158fe..a4246c79694 100644 --- a/tests/integration/test_table_db_num_limit/config/config.xml +++ b/tests/integration/test_table_db_num_limit/config/config.xml @@ -1,5 +1,17 @@ + + + + + node1 + 9000 + + + + + 10 + 5 10 diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index b3aff6ddca2..ce981ffca3c 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -1,11 +1,14 @@ import pytest -from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["config/config.xml"]) +node = cluster.add_instance( + "node1", + with_zookeeper=True, + main_configs=["config/config.xml"], +) @pytest.fixture(scope="module") @@ -24,10 +27,9 @@ def test_table_db_limit(started_cluster): for i in range(9): node.query("create database db{}".format(i)) - with pytest.raises(QueryRuntimeException) as exp_info: - node.query("create database db_exp".format(i)) - - assert "TOO_MANY_DATABASES" in str(exp_info) + assert "TOO_MANY_DATABASES" in node.query_and_get_error( + "create database db_exp".format(i) + ) for i in range(10): node.query("create table t{} (a Int32) Engine = Log".format(i)) @@ -35,13 +37,36 @@ def test_table_db_limit(started_cluster): # This checks that system tables are not accounted in the number of tables. node.query("system flush logs") + # Regular tables for i in range(10): node.query("drop table t{}".format(i)) for i in range(10): node.query("create table t{} (a Int32) Engine = Log".format(i)) - with pytest.raises(QueryRuntimeException) as exp_info: - node.query("create table default.tx (a Int32) Engine = Log") + assert "TOO_MANY_TABLES" in node.query_and_get_error( + "create table default.tx (a Int32) Engine = Log" + ) - assert "TOO_MANY_TABLES" in str(exp_info) + # Replicated tables + for i in range(10): + node.query("drop table t{}".format(i)) + + for i in range(5): + node.query( + "create table t{} (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', 'r1') order by a".format( + i, i + ) + ) + + assert "Too many replicated tables" in node.query_and_get_error( + "create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', 'r1') order by a" + ) + + # Checks that replicated tables are also counted as regular tables + for i in range(5, 10): + node.query("create table t{} (a Int32) Engine = Log".format(i)) + + assert "TOO_MANY_TABLES" in node.query_and_get_error( + "create table tx (a Int32) Engine = Log" + ) From 2d7de40ba70d6609f6fd79c5ef8534002803b707 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:24:03 +0000 Subject: [PATCH 126/566] fix sparse tables --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 54331186302..635b46de3ee 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -458,7 +458,7 @@ void FillingTransform::initColumns( non_const_columns.reserve(input_columns.size()); for (const auto & column : input_columns) - non_const_columns.push_back(column->convertToFullColumnIfConst()); + non_const_columns.push_back(column->convertToFullColumnIfConst()->convertToFullColumnIfSparse()); for (const auto & column : non_const_columns) output_columns.push_back(column->cloneEmpty()->assumeMutable()); From 37f691bf9d1168431500c39c47432722a441a29e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:42:52 +0000 Subject: [PATCH 127/566] add test --- .../03266_with_fill_staleness.reference | 28 +++++++++++++++++ .../0_stateless/03266_with_fill_staleness.sql | 31 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference new file mode 100644 index 00000000000..6061ecfe400 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -0,0 +1,28 @@ +add samples +regular with fill +2016-06-15 23:00:00 0 +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:03 0 +2016-06-15 23:00:04 0 +2016-06-15 23:00:05 5 +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:08 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:13 10 +2016-06-15 23:00:14 10 +2016-06-15 23:00:15 15 +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:18 15 +2016-06-15 23:00:19 15 +2016-06-15 23:00:20 20 +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:23 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql new file mode 100644 index 00000000000..3ab9be63a08 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS with_fill_staleness; +CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; + +SELECT 'add samples'; + +INSERT INTO with_fill_staleness +SELECT + toDateTime('2016-06-15 23:00:00') + number AS a, a as b, number as c +FROM numbers(30) +WHERE (number % 5) == 0; + +SELECT 'regular with fill'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL INTERPOLATE (c); + +SELECT 'staleness 1 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 1 SECOND INTERPOLATE (c); + +SELECT 'staleness 3 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 3 SECOND INTERPOLATE (c); + +SELECT 'descending order'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a DESC WITH FILL STALENESS INTERVAL -2 SECOND INTERPOLATE (c); + +SELECT 'staleness with to and step'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL TO toDateTime('2016-06-15 23:00:40') STEP 3 STALENESS INTERVAL 7 SECOND INTERPOLATE (c); + +SELECT 'staleness with another regular with fill'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL FROM 0 TO 3 INTERPOLATE (c); + +SELECT 'double staleness'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL TO toDateTime('2016-06-15 23:01:00') STEP 2 STALENESS 5 INTERPOLATE (c); From 9760d39efe82339403de7a7177706c42c8d8c5a5 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:43:15 +0000 Subject: [PATCH 128/566] allow negative staleness for descending order --- src/Planner/PlannerSorting.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index 0a33e2f0828..9476ae348c5 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -105,10 +105,6 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) if (sort_node.hasFillFrom()) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STALENESS cannot be used together with WITH FILL FROM"); - - if (applyVisitor(FieldVisitorAccurateLessOrEqual(), fill_column_description.fill_staleness, Field{0})) - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "WITH FILL STALENESS value cannot be less or equal zero"); } if (sort_node.getSortDirection() == SortDirection::ASCENDING) @@ -117,6 +113,10 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be negative for sorting in ascending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_staleness, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be negative for sorting in ascending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_to, fill_column_description.fill_from)) { @@ -130,6 +130,10 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be positive for sorting in descending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), Field{0}, fill_column_description.fill_staleness)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be positive for sorting in descending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_from, fill_column_description.fill_to)) { From fc33593ff05ab3c5ca4271b79ba4eb39957fa057 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:45:02 +0000 Subject: [PATCH 129/566] fix style --- src/Interpreters/FillingRow.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 1d3eae03ddd..fdd3b55b66b 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -72,7 +72,8 @@ std::optional FillingRow::doJump(const FillColumnDescription& descr, size if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) return std::nullopt; - if (!descr.fill_staleness.isNull()) { + if (!descr.fill_staleness.isNull()) + { Field staleness_border = staleness_base_row[column_ind]; descr.staleness_step_func(staleness_border, 1); @@ -92,7 +93,8 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, if (less(to, shifted_value, getDirection(column_ind))) return std::nullopt; - for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) { + for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) + { Field next_value = shifted_value; descr.step_func(next_value, step_len); @@ -197,9 +199,8 @@ void FillingRow::initFromDefaults(size_t from_pos) void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { - for (size_t i = 0; i < size(); ++i) { + for (size_t i = 0; i < size(); ++i) staleness_base_row[i] = (*base_row[i])[row_ind]; - } } String FillingRow::dump() const From 4c9d865e7592985507accd7aa805647ef9335d72 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:45:27 +0000 Subject: [PATCH 130/566] disable debug logs --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 635b46de3ee..7f81b86697c 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr bool debug_logging_enabled = true; +constexpr bool debug_logging_enabled = false; template void logDebug(String key, const T & value, const char * separator = " : ") From 83844841b4f00a24a654ac7ce9f665c321b4df85 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:04:00 +0000 Subject: [PATCH 131/566] fix test timezone --- .../03266_with_fill_staleness.reference | 163 +++++++++++++++--- .../0_stateless/03266_with_fill_staleness.sql | 2 + 2 files changed, 139 insertions(+), 26 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference index 6061ecfe400..6b090443359 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.reference +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -1,28 +1,139 @@ add samples regular with fill -2016-06-15 23:00:00 0 -2016-06-15 23:00:01 0 -2016-06-15 23:00:02 0 -2016-06-15 23:00:03 0 -2016-06-15 23:00:04 0 -2016-06-15 23:00:05 5 -2016-06-15 23:00:06 5 -2016-06-15 23:00:07 5 -2016-06-15 23:00:08 5 -2016-06-15 23:00:09 5 -2016-06-15 23:00:10 10 -2016-06-15 23:00:11 10 -2016-06-15 23:00:12 10 -2016-06-15 23:00:13 10 -2016-06-15 23:00:14 10 -2016-06-15 23:00:15 15 -2016-06-15 23:00:16 15 -2016-06-15 23:00:17 15 -2016-06-15 23:00:18 15 -2016-06-15 23:00:19 15 -2016-06-15 23:00:20 20 -2016-06-15 23:00:21 20 -2016-06-15 23:00:22 20 -2016-06-15 23:00:23 20 -2016-06-15 23:00:24 20 -2016-06-15 23:00:25 25 +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:03 0 +2016-06-15 23:00:04 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:08 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:13 10 +2016-06-15 23:00:14 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:18 15 +2016-06-15 23:00:19 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:23 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +staleness 1 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:05 5 original +2016-06-15 23:00:10 10 original +2016-06-15 23:00:15 15 original +2016-06-15 23:00:20 20 original +2016-06-15 23:00:25 25 original +staleness 3 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:25 25 original +descending order +2016-06-15 23:00:25 25 original +2016-06-15 23:00:24 25 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:19 20 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:14 15 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:09 10 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:04 5 +2016-06-15 23:00:00 0 original +staleness with to and step +2016-06-15 23:00:00 0 original +2016-06-15 23:00:03 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:18 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +2016-06-15 23:00:27 25 +2016-06-15 23:00:30 25 +staleness with another regular with fill +2016-06-15 23:00:00 1970-01-01 01:00:00 0 +2016-06-15 23:00:00 1970-01-01 01:00:01 0 +2016-06-15 23:00:00 1970-01-01 01:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:01 1970-01-01 01:00:01 0 +2016-06-15 23:00:01 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:05 1970-01-01 01:00:01 5 +2016-06-15 23:00:05 1970-01-01 01:00:02 5 +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:06 1970-01-01 01:00:01 5 +2016-06-15 23:00:06 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:10 1970-01-01 01:00:01 10 +2016-06-15 23:00:10 1970-01-01 01:00:02 10 +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:11 1970-01-01 01:00:01 10 +2016-06-15 23:00:11 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:15 1970-01-01 01:00:01 15 +2016-06-15 23:00:15 1970-01-01 01:00:02 15 +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:16 1970-01-01 01:00:01 15 +2016-06-15 23:00:16 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:20 1970-01-01 01:00:01 20 +2016-06-15 23:00:20 1970-01-01 01:00:02 20 +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:21 1970-01-01 01:00:01 20 +2016-06-15 23:00:21 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:25 1970-01-01 01:00:01 25 +2016-06-15 23:00:25 1970-01-01 01:00:02 25 +double staleness +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:00 2016-06-15 23:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:04 0 +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:05 2016-06-15 23:00:07 5 +2016-06-15 23:00:05 2016-06-15 23:00:09 5 +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:10 2016-06-15 23:00:12 10 +2016-06-15 23:00:10 2016-06-15 23:00:14 10 +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:15 2016-06-15 23:00:17 15 +2016-06-15 23:00:15 2016-06-15 23:00:19 15 +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:20 2016-06-15 23:00:22 20 +2016-06-15 23:00:20 2016-06-15 23:00:24 20 +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:25 2016-06-15 23:00:27 25 +2016-06-15 23:00:25 2016-06-15 23:00:29 25 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql index 3ab9be63a08..fff702ffd83 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -1,3 +1,5 @@ +SET session_timezone='Europe/Amsterdam'; + DROP TABLE IF EXISTS with_fill_staleness; CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; From 60f0efa67689c28bd5b155eefd3266f385822b94 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:08:25 +0000 Subject: [PATCH 132/566] remove debug log --- src/Planner/Planner.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index f1c752aecd0..8d3c75fdabb 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -847,9 +847,6 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, interpolate_description = std::make_shared(std::move(interpolate_actions_dag), empty_aliases); } - if (interpolate_description) - LOG_DEBUG(getLogger("addWithFillStepIfNeeded"), "InterpolateDescription: {}", interpolate_description->actions.dumpDAG()); - const auto & query_context = planner_context->getQueryContext(); const Settings & settings = query_context->getSettingsRef(); auto filling_step = std::make_unique( From 64d038c4408f500ae58a6a3cdd68e99c2901faa0 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:14:56 +0000 Subject: [PATCH 133/566] cleanup --- src/Analyzer/SortNode.h | 6 ++--- src/Common/FieldVisitorScale.cpp | 22 +++++++++---------- src/Common/FieldVisitorScale.h | 3 --- src/Core/Field.h | 8 ------- .../Transforms/FillingTransform.cpp | 8 ++----- 5 files changed, 16 insertions(+), 31 deletions(-) diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h index d9086dc9ed7..6f0010abdaa 100644 --- a/src/Analyzer/SortNode.h +++ b/src/Analyzer/SortNode.h @@ -105,19 +105,19 @@ public: return children[fill_step_child_index]; } - /// Returns true if sort node has fill step, false otherwise + /// Returns true if sort node has fill staleness, false otherwise bool hasFillStaleness() const { return children[fill_staleness_child_index] != nullptr; } - /// Get fill step + /// Get fill staleness const QueryTreeNodePtr & getFillStaleness() const { return children[fill_staleness_child_index]; } - /// Get fill step + /// Get fill staleness QueryTreeNodePtr & getFillStaleness() { return children[fill_staleness_child_index]; diff --git a/src/Common/FieldVisitorScale.cpp b/src/Common/FieldVisitorScale.cpp index fdb566007c3..a6c0f6d0c5b 100644 --- a/src/Common/FieldVisitorScale.cpp +++ b/src/Common/FieldVisitorScale.cpp @@ -15,16 +15,16 @@ void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; } void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; } void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ } -void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } -void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } -void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } -void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } -void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } -void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } -void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } -void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } -void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } -void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); } -void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } +void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Strings"); } +void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Arrays"); } +void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Tuples"); } +void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Maps"); } +void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Objects"); } +void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale UUIDs"); } +void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv4s"); } +void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv6s"); } +void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale custom type {}", x.getTypeName()); } +void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale AggregateFunctionStates"); } +void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Bools"); } } diff --git a/src/Common/FieldVisitorScale.h b/src/Common/FieldVisitorScale.h index 45bacdccc9c..90d86cc53bd 100644 --- a/src/Common/FieldVisitorScale.h +++ b/src/Common/FieldVisitorScale.h @@ -1,10 +1,7 @@ #pragma once -#include #include #include -#include "base/Decimal.h" -#include "base/extended_types.h" namespace DB { diff --git a/src/Core/Field.h b/src/Core/Field.h index 47df5c2907e..7b916d30646 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -185,14 +185,6 @@ public: return *this; } - const DecimalField & operator *= (const DecimalField & r) - { - if (scale != r.getScale()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Multiply different decimal fields"); - dec *= r.getValue(); - return *this; - } - const DecimalField & operator -= (const DecimalField & r) { if (scale != r.getScale()) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 7f81b86697c..46a670394a5 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -125,10 +125,6 @@ static FillColumnDescription::StepFunction getStepFunction(const Field & step, c if (jumps_count != 1) applyVisitor(FieldVisitorScale(jumps_count), shifted_step); - logDebug("field", field.dump()); - logDebug("step", step.dump()); - logDebug("shifted field", shifted_step.dump()); - applyVisitor(FieldVisitorSum(shifted_step), field); }; } @@ -684,8 +680,8 @@ void FillingTransform::transformRange( } const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); - logDebug("apply", apply); - logDebug("changed", changed); + logDebug("long jump apply", apply); + logDebug("long jump changed", changed); if (changed) filling_row_changed = true; From f905c804f5b5aa0c0b14e9aaab1034fa8fbbef03 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 19:58:53 +0000 Subject: [PATCH 134/566] fix calibration jump --- src/Interpreters/FillingRow.cpp | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index fdd3b55b66b..49ee558cb20 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -153,23 +153,17 @@ std::pair FillingRow::next(const FillingRow & to_row, bool long_jump if (!next_value.has_value()) return {false, false}; - Field calibration_jump_value = next_value.value(); - fill_column_desc.step_func(calibration_jump_value, 1); - - if (equals(calibration_jump_value, to_row[pos])) - next_value = calibration_jump_value; - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) - return {false, false}; + /// We need value >= to_row[pos] + fill_column_desc.step_func(next_value.value(), 1); } else { next_value = doJump(fill_column_desc, pos); - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) - return {false, false}; } + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + row[pos] = std::move(next_value.value()); if (equals(row[pos], to_row.row[pos])) { From 6772d3fe6623f73edb4509a7d6e9cbdc5e9883f9 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 22:08:38 +0000 Subject: [PATCH 135/566] little improvement --- src/Interpreters/FillingRow.cpp | 17 ++++++++++------- src/Interpreters/FillingRow.h | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 49ee558cb20..8c5f102bcd6 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -28,7 +28,7 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); - staleness_base_row.resize(sort_description.size()); + staleness_border.resize(sort_description.size()); } bool FillingRow::operator<(const FillingRow & other) const @@ -74,10 +74,7 @@ std::optional FillingRow::doJump(const FillColumnDescription& descr, size if (!descr.fill_staleness.isNull()) { - Field staleness_border = staleness_base_row[column_ind]; - descr.staleness_step_func(staleness_border, 1); - - if (less(next_value, staleness_border, getDirection(column_ind))) + if (less(next_value, staleness_border[column_ind], getDirection(column_ind))) return next_value; else return std::nullopt; @@ -93,7 +90,7 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, if (less(to, shifted_value, getDirection(column_ind))) return std::nullopt; - for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) + for (int32_t step_len = 1, step_no = 0; step_no < 100 && step_len > 0; ++step_no) { Field next_value = shifted_value; descr.step_func(next_value, step_len); @@ -194,7 +191,13 @@ void FillingRow::initFromDefaults(size_t from_pos) void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) - staleness_base_row[i] = (*base_row[i])[row_ind]; + { + staleness_border[i] = (*base_row[i])[row_ind]; + + const auto& descr = getFillDescription(i); + if (!descr.fill_staleness.isNull()) + descr.staleness_step_func(staleness_border[i], 1); + } } String FillingRow::dump() const diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 14b6034ce35..dc787173191 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -46,7 +46,7 @@ public: private: Row row; - Row staleness_base_row; + Row staleness_border; SortDescription sort_description; }; From b03a296542de52c3cb2b6f309a4bc496e4a70454 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:25:38 +0000 Subject: [PATCH 136/566] Fix right join - disabling PR lead to dup result --- src/Planner/PlannerJoinTree.cpp | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 7889a358d95..834e572b167 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,11 +665,15 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); - const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "pr_enabled={} table_expression:\n{}", + settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), + table_expression->dumpTree()); + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; @@ -914,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + // const bool other_table_already_chosen_for_reading_with_parallel_replicas + // = planner_context->getGlobalPlannerContext()->parallel_replicas_table + // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + // if (other_table_already_chosen_for_reading_with_parallel_replicas) + // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -930,6 +934,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); + LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); + auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1249,6 +1255,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); + auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1921,6 +1929,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); + return std::move(query_plans_stack.back()); } From dc976c48d284fa79ad05fe234130ed3794522511 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:36:57 +0000 Subject: [PATCH 137/566] Test --- .../03254_pr_join_on_dups.reference | 273 ++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 81 ++++++ 2 files changed, 354 insertions(+) create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.reference create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.sql diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference new file mode 100644 index 00000000000..58602bafb5d --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -0,0 +1,273 @@ +inner +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +left +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +right +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +right subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +self inner +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self inner nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self inner nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self inner nullable vs not nullable 2 +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self left +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self left nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable 2 +1 r1 \N 0 \N +1 r2 \N 0 \N +2 r3 \N 0 \N +3 r4 \N 0 \N +3 r5 \N 0 \N +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self right +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self right nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self right nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql new file mode 100644 index 00000000000..71695c0d486 --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -0,0 +1,81 @@ +drop table if exists X sync; +drop table if exists Y sync; + +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); +create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); + +insert into X (id, x_a, x_b) values (1, 'l1', 1), (2, 'l2', 2), (2, 'l3', 3), (3, 'l4', 4); +insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), (9, 'l9'); +insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); +insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); + +set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; + +select 'inner'; +select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'inner expr'; +select X.*, Y.* from X inner join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'left'; +select X.*, Y.* from X left join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'left expr'; +select X.*, Y.* from X left join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'right'; +select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'right expr'; +--select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'full expr'; +--select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'self inner'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self inner nullable vs not nullable 2'; +select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self left'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable'; +select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self left nullable vs not nullable 2'; +select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self right'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable'; +select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable vs not nullable'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self right nullable vs not nullable 2'; +--select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self full nullable vs not nullable 2'; +--select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +-- drop table X; +-- drop table Y; From 219cc4e5d241201d8bb4838cc440735ec5c905ea Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 29 Oct 2024 12:15:13 +0800 Subject: [PATCH 138/566] fix mismatched aggreage function name of quantileExactWeightedInterpolated --- .../AggregateFunctionQuantileExactWeighted.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp index 58b3b75b056..116b04bf4ba 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp @@ -387,7 +387,7 @@ template using FuncQuantileExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantileExactWeighted, + std::conditional_t, true, std::conditional_t, false, @@ -396,7 +396,7 @@ template using FuncQuantilesExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantilesExactWeighted, + std::conditional_t, true, std::conditional_t, true, From 190703b603fe8bfef6d92cc883f9e0107fdce83c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Oct 2024 05:32:52 +0100 Subject: [PATCH 139/566] Close #8687 --- .../03258_multiple_array_joins.reference | 8 +++++++ .../03258_multiple_array_joins.sql | 24 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 tests/queries/0_stateless/03258_multiple_array_joins.reference create mode 100644 tests/queries/0_stateless/03258_multiple_array_joins.sql diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.reference b/tests/queries/0_stateless/03258_multiple_array_joins.reference new file mode 100644 index 00000000000..4d357c8ac80 --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.reference @@ -0,0 +1,8 @@ +1 Michel Foucault alive no +1 Michel Foucault profession philosopher +1 Thomas Aquinas alive no +1 Thomas Aquinas profession philosopher +2 Nicola Tesla alive no +2 Nicola Tesla profession inventor +2 Thomas Edison alive no +2 Thomas Edison profession inventor diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.sql b/tests/queries/0_stateless/03258_multiple_array_joins.sql new file mode 100644 index 00000000000..5afe7725d3f --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_multiple_array_join; + +CREATE TABLE test_multiple_array_join ( + id UInt64, + person Nested ( + name String, + surname String + ), + properties Nested ( + key String, + value String + ) +) Engine=MergeTree ORDER BY id; + +INSERT INTO test_multiple_array_join VALUES (1, ['Thomas', 'Michel'], ['Aquinas', 'Foucault'], ['profession', 'alive'], ['philosopher', 'no']); +INSERT INTO test_multiple_array_join VALUES (2, ['Thomas', 'Nicola'], ['Edison', 'Tesla'], ['profession', 'alive'], ['inventor', 'no']); + +SELECT * +FROM test_multiple_array_join +ARRAY JOIN person +ARRAY JOIN properties +ORDER BY ALL; + +DROP TABLE test_multiple_array_join; From aaba95ca8ceac01fcc22416a400d52c8a169cafd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Tue, 29 Oct 2024 11:41:37 +0300 Subject: [PATCH 140/566] Simplify and fix limit check --- src/Interpreters/InterpreterCreateQuery.cpp | 51 ++++++--------------- 1 file changed, 15 insertions(+), 36 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f8e85733911..3a6e7bc1653 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1950,46 +1950,25 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, void InterpreterCreateQuery::throwIfTooManyEntities(ASTCreateQuery & create, StoragePtr storage) const { + auto check_and_throw = [&](auto setting, CurrentMetrics::Metric metric, String setting_name, String entity_name) + { + UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[setting]; + UInt64 attached_count = CurrentMetrics::get(metric); + if (num_limit > 0 && attached_count >= num_limit) + throw Exception(ErrorCodes::TOO_MANY_TABLES, + "Too many {}. " + "The limit (server configuration parameter `{}`) is set to {}, the current number is {}", + entity_name, setting_name, num_limit, attached_count); + }; + if (auto * replicated_storage = typeid_cast(storage.get())) - { - UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_replicated_table_num_to_throw]; - UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedReplicatedTable); - if (attached_count >= num_limit) - throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many replicated tables. " - "The limit (server configuration parameter `max_replicated_table_num_to_throw`) is set to {}, the current number is {}", - num_limit, attached_count); - } + check_and_throw(ServerSetting::max_replicated_table_num_to_throw, CurrentMetrics::AttachedReplicatedTable, "max_replicated_table_num_to_throw", "replicated tables"); else if (create.is_dictionary) - { - UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_dictionary_num_to_throw]; - UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedDictionary); - if (attached_count >= num_limit) - throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many dictionaries. " - "The limit (server configuration parameter `max_dictionary_num_to_throw`) is set to {}, the current number is {}", - num_limit, attached_count); - } + check_and_throw(ServerSetting::max_dictionary_num_to_throw, CurrentMetrics::AttachedDictionary, "max_dictionary_num_to_throw", "dictionaries"); else if (create.isView()) - { - UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_view_num_to_throw]; - UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedView); - if (attached_count >= num_limit) - throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many views. " - "The limit (server configuration parameter `max_view_num_to_throw`) is set to {}, the current number is {}", - num_limit, attached_count); - } + check_and_throw(ServerSetting::max_view_num_to_throw, CurrentMetrics::AttachedView, "max_view_num_to_throw", "views"); else - { - UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_table_num_to_throw]; - UInt64 attached_count = CurrentMetrics::get(CurrentMetrics::AttachedTable); - if (attached_count >= num_limit) - throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many tables. " - "The limit (server configuration parameter `max_table_num_to_throw`) is set to {}, the current number is {}", - num_limit, attached_count); - } + check_and_throw(ServerSetting::max_table_num_to_throw, CurrentMetrics::AttachedTable, "max_table_num_to_throw", "tables"); } From 19c95b2f0e52bd3794d160605e24c59abc5101b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Tue, 29 Oct 2024 11:44:50 +0300 Subject: [PATCH 141/566] Test dictionaries --- .../test_table_db_num_limit/config/config.xml | 1 + tests/integration/test_table_db_num_limit/test.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/tests/integration/test_table_db_num_limit/config/config.xml b/tests/integration/test_table_db_num_limit/config/config.xml index a4246c79694..bfe50325d3f 100644 --- a/tests/integration/test_table_db_num_limit/config/config.xml +++ b/tests/integration/test_table_db_num_limit/config/config.xml @@ -10,6 +10,7 @@ + 10 10 5 10 diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index ce981ffca3c..bcfa60e48cd 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -48,6 +48,18 @@ def test_table_db_limit(started_cluster): "create table default.tx (a Int32) Engine = Log" ) + # Dictionaries + for i in range(10): + node.query( + "create dictionary d{} (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)".format( + i + ) + ) + + assert "TOO_MANY_TABLES" in node.query_and_get_error( + "create dictionary dx (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)" + ) + # Replicated tables for i in range(10): node.query("drop table t{}".format(i)) From e2c2e67c7b4915da6302a516826573cf1ccee701 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 10:02:24 +0000 Subject: [PATCH 142/566] Fix --- src/Planner/findParallelReplicasQuery.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 891e5034f44..58a7f48ee2b 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -423,7 +423,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From c7fce84729435f98222d0e02ba035cdd6085a0df Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 12:17:46 +0000 Subject: [PATCH 143/566] Cleanup --- src/Interpreters/ClusterProxy/executeQuery.cpp | 4 ++-- src/Planner/Planner.cpp | 2 +- src/Planner/PlannerJoinTree.cpp | 16 +++++----------- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- src/Planner/findQueryForParallelReplicas.h | 4 ++-- .../03173_parallel_replicas_join_bug.sh | 3 +++ 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 4b1f3094be3..e88fdeb0379 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 8d3c75fdabb..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -274,7 +274,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & return res; } -FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return {}; diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 834e572b167..5c08cc27aff 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -918,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - // const bool other_table_already_chosen_for_reading_with_parallel_replicas - // = planner_context->getGlobalPlannerContext()->parallel_replicas_table - // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - // if (other_table_already_chosen_for_reading_with_parallel_replicas) - // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + const bool other_table_already_chosen_for_reading_with_parallel_replicas + = planner_context->getGlobalPlannerContext()->parallel_replicas_table + && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + if (other_table_already_chosen_for_reading_with_parallel_replicas) + planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -934,8 +934,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); - LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); - auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1255,8 +1253,6 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); - auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1929,8 +1925,6 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); - return std::move(query_plans_stack.back()); } diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 58a7f48ee2b..d92500e82fc 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -250,7 +250,7 @@ const QueryNode * findQueryForParallelReplicas( return res; } -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; @@ -404,7 +404,7 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que return nullptr; } -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; diff --git a/src/Planner/findQueryForParallelReplicas.h b/src/Planner/findQueryForParallelReplicas.h index cdce4ad0b47..83aa11c8c64 100644 --- a/src/Planner/findQueryForParallelReplicas.h +++ b/src/Planner/findQueryForParallelReplicas.h @@ -15,10 +15,10 @@ struct SelectQueryOptions; /// Find a query which can be executed with parallel replicas up to WithMergableStage. /// Returned query will always contain some (>1) subqueries, possibly with joins. -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); /// Find a table from which we should read on follower replica. It's the left-most table within all JOINs and UNIONs. -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); struct JoinTreeQueryPlan; diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 289a49c72f4..1ee3d729cb4 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -6,12 +6,15 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q " + DROP TABLE IF EXISTS ids; CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); + DROP TABLE IF EXISTS data; CREATE TABLE data (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-01', 'CREATED'); + DROP TABLE IF EXISTS data2; CREATE TABLE data2 (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data2 VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-02', 'CREATED'); " From 772209e6c0bd0a124d6605a6fe6ef873df8ec161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Tue, 29 Oct 2024 16:23:21 +0300 Subject: [PATCH 144/566] Test other replica and cleanup --- .../test_table_db_num_limit/config/config.xml | 5 ++- .../config/config1.xml | 4 ++ .../config/config2.xml | 4 ++ .../test_table_db_num_limit/test.py | 40 +++++++++++++++++-- 4 files changed, 48 insertions(+), 5 deletions(-) create mode 100644 tests/integration/test_table_db_num_limit/config/config1.xml create mode 100644 tests/integration/test_table_db_num_limit/config/config2.xml diff --git a/tests/integration/test_table_db_num_limit/config/config.xml b/tests/integration/test_table_db_num_limit/config/config.xml index bfe50325d3f..88438d51b94 100644 --- a/tests/integration/test_table_db_num_limit/config/config.xml +++ b/tests/integration/test_table_db_num_limit/config/config.xml @@ -6,13 +6,16 @@ node1 9000 + + node2 + 9000 + 10 10 - 5 10 diff --git a/tests/integration/test_table_db_num_limit/config/config1.xml b/tests/integration/test_table_db_num_limit/config/config1.xml new file mode 100644 index 00000000000..73b695f3cd6 --- /dev/null +++ b/tests/integration/test_table_db_num_limit/config/config1.xml @@ -0,0 +1,4 @@ + + 5 + + diff --git a/tests/integration/test_table_db_num_limit/config/config2.xml b/tests/integration/test_table_db_num_limit/config/config2.xml new file mode 100644 index 00000000000..e46ca03d70f --- /dev/null +++ b/tests/integration/test_table_db_num_limit/config/config2.xml @@ -0,0 +1,4 @@ + + 3 + + diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index bcfa60e48cd..53a644a262c 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -7,7 +7,15 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node1", with_zookeeper=True, - main_configs=["config/config.xml"], + macros={"replica": "r1"}, + main_configs=["config/config.xml", "config/config1.xml"], +) + +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + macros={"replica": "r2"}, + main_configs=["config/config.xml", "config/config2.xml"], ) @@ -64,15 +72,27 @@ def test_table_db_limit(started_cluster): for i in range(10): node.query("drop table t{}".format(i)) - for i in range(5): + for i in range(3): node.query( - "create table t{} (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', 'r1') order by a".format( + "create table t{} on cluster 'cluster' (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format( + i, i + ) + ) + + # Test limit on other replica + assert "Too many replicated tables" in node2.query_and_get_error( + "create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a" + ) + + for i in range(3, 5): + node.query( + "create table t{} (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format( i, i ) ) assert "Too many replicated tables" in node.query_and_get_error( - "create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', 'r1') order by a" + "create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a" ) # Checks that replicated tables are also counted as regular tables @@ -82,3 +102,15 @@ def test_table_db_limit(started_cluster): assert "TOO_MANY_TABLES" in node.query_and_get_error( "create table tx (a Int32) Engine = Log" ) + + # Cleanup + for i in range(10): + node.query("drop table t{} sync".format(i)) + for i in range(3): + node2.query("drop table t{} sync".format(i)) + node.query("system drop replica 'r1' from ZKPATH '/clickhouse/tables/tx'") + node.query("system drop replica 'r2' from ZKPATH '/clickhouse/tables/tx'") + for i in range(9): + node.query("drop database db{}".format(i)) + for i in range(10): + node.query("drop dictionary d{}".format(i)) From 04f68594dcf3dccc5eaecd542e00073af39777d9 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Tue, 29 Oct 2024 21:36:43 +0800 Subject: [PATCH 145/566] Print method in clickhouse-compressor --stat. --- programs/compressor/Compressor.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 819f16cfd64..fc07a0adc66 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -33,12 +33,12 @@ namespace DB namespace { -/// Outputs sizes of uncompressed and compressed blocks for compressed file. +/// Outputs method, sizes of uncompressed and compressed blocks for compressed file. void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum + in.ignore(16); /// checksum char header[COMPRESSED_BLOCK_HEADER_SIZE]; in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); @@ -50,6 +50,13 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) UInt32 size_decompressed = unalignedLoad(&header[5]); + auto method_byte = static_cast(header[0]); + auto method = magic_enum::enum_cast(method_byte); + if (method) + DB::writeText(magic_enum::enum_name(*method), out); + else + DB::writeText(fmt::format("UNKNOWN({})", method_byte), out); + DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); From 0fda9bf238d261269b2dd7f47c79898ceaf931cb Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 14:38:57 +0000 Subject: [PATCH 146/566] Fix 03080_incorrect_join_with_merge.sql --- src/Storages/buildQueryTreeForShard.cpp | 4 ++-- .../queries/0_stateless/03080_incorrect_join_with_merge.sql | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index df9bfd049fb..8d8af134a05 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -366,8 +366,8 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { QueryTreeNodePtr join_table_expression; const auto join_kind = join_node->getKind(); - const auto join_strictness = join_node->getStrictness(); - if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + // const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner /* && join_strictness == JoinStrictness::All*/)) { join_table_expression = join_node->getRightTableExpression(); } diff --git a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql index a34c71a44e2..a743c5bdffb 100644 --- a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql +++ b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql @@ -2,6 +2,7 @@ SET enable_analyzer=1; SET distributed_foreground_insert=1; +DROP TABLE IF EXISTS first_table_lr SYNC; CREATE TABLE first_table_lr ( id String, @@ -11,6 +12,7 @@ ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03080/alter', ' ORDER BY id; +DROP TABLE IF EXISTS first_table; CREATE TABLE first_table ( id String, @@ -19,6 +21,7 @@ CREATE TABLE first_table ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'first_table_lr'); +DROP TABLE IF EXISTS second_table_lr; CREATE TABLE second_table_lr ( id String, @@ -26,6 +29,7 @@ CREATE TABLE second_table_lr ) ENGINE = MergeTree() ORDER BY id; +DROP TABLE IF EXISTS second_table; CREATE TABLE second_table ( id String, @@ -36,6 +40,7 @@ ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'second_table_lr INSERT INTO first_table VALUES ('1', '2'), ('3', '4'); INSERT INTO second_table VALUES ('1', '2'), ('3', '4'); +DROP TABLE IF EXISTS two_tables; CREATE TABLE two_tables ( id String, From 66f750ea6f12c08f99c7fecea700d8c7f1eaeeb7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 29 Oct 2024 15:12:03 +0000 Subject: [PATCH 147/566] remove debug logs --- src/Interpreters/Aggregator.cpp | 3 --- src/Parsers/CreateQueryUUIDs.cpp | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 2dd6513d498..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,7 +1504,6 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1521,7 +1520,6 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2934,7 +2932,6 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp index c788cc7a025..14cf5761a11 100644 --- a/src/Parsers/CreateQueryUUIDs.cpp +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -31,7 +31,7 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r /// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible. /// Thus it's not safe for example to replace /// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with - /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b" + /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "248372b7-02c4-4c88-a5e1-282a83cc572a" AS SELECT a FROM b" /// This replacement is safe only for CREATE queries when inner target tables don't exist yet. if (!query.attach) { From 33d986927036bcef001f220092523fd256baa350 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Tue, 29 Oct 2024 19:42:43 +0100 Subject: [PATCH 148/566] Update settings.md --- docs/en/operations/settings/settings.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 821d08cad7b..e1af24a0b8e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -717,22 +717,6 @@ Default value: 0 In CREATE TABLE statement allows specifying Variant type with similar variant types (for example, with different numeric or date types). Enabling this setting may introduce some ambiguity when working with values with similar types. -## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} - -Type: Bool - -Default value: 0 - -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. - -## allow_suspicious_types_in_order_by {#allow_suspicious_types_in_order_by} - -Type: Bool - -Default value: 0 - -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys. - ## allow_unrestricted_reads_from_keeper {#allow_unrestricted_reads_from_keeper} Type: Bool From 170a24a4187bda9a5bc25fa8263222e502963b10 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Tue, 29 Oct 2024 19:43:13 +0100 Subject: [PATCH 149/566] Update SettingsChangesHistory.cpp --- src/Core/SettingsChangesHistory.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 169429d1c34..fc5066029e8 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,8 @@ static std::initializer_list Date: Tue, 29 Oct 2024 19:44:00 +0100 Subject: [PATCH 150/566] Update settings.md --- docs/en/operations/settings/settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e1af24a0b8e..b9b81022d4f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -9746,3 +9746,5 @@ Type: Int64 Default value: 0 Allows you to select the max window log of ZSTD (it will not be used for MergeTree family) + + From bebef8d0d96e27c9823419b3a7f669d62c6a6a56 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 23:58:39 +0000 Subject: [PATCH 151/566] Fix right joins again --- src/Planner/PlannerJoinTree.cpp | 38 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5c08cc27aff..0007dc9d158 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,7 +665,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - const auto & query_context = planner_context->getQueryContext(); + auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); LOG_DEBUG( @@ -922,17 +922,33 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres = planner_context->getGlobalPlannerContext()->parallel_replicas_table && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + { + chassert(query_context->canUseParallelReplicasOnFollower()); - storage->read( - query_plan, - columns_names, - storage_snapshot, - table_expression_query_info, - query_context, - from_stage, - max_block_size, - max_streams); + auto mutable_context = Context::createCopy(query_context); + mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + mutable_context, + from_stage, + max_block_size, + max_streams); + } + else + { + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + query_context, + from_stage, + max_block_size, + max_streams); + } auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { From c5d6acf5e3ff24122518abb992e78c73954f8703 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 09:00:18 +0800 Subject: [PATCH 152/566] Fix --- programs/compressor/Compressor.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index fc07a0adc66..69936912d49 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -1,3 +1,6 @@ +/// For magic_enum to properly get enum name of DB::CompressionMethodByte +#define MAGIC_ENUM_RANGE_MAX 256 + #include #include #include From 10ee24d9a0c749624b86840f78b2bd3bbdb221d9 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 09:41:18 +0800 Subject: [PATCH 153/566] Fix multiple codecs and add test --- programs/compressor/Compressor.cpp | 26 +++++----------- .../getCompressionCodecForFile.cpp | 31 ++++++++++++++----- src/Compression/getCompressionCodecForFile.h | 4 +++ .../03260_compressor_stat.reference | 1 + .../0_stateless/03260_compressor_stat.sh | 13 ++++++++ 5 files changed, 49 insertions(+), 26 deletions(-) create mode 100644 tests/queries/0_stateless/03260_compressor_stat.reference create mode 100755 tests/queries/0_stateless/03260_compressor_stat.sh diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 69936912d49..7bb434d40a8 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -1,6 +1,3 @@ -/// For magic_enum to properly get enum name of DB::CompressionMethodByte -#define MAGIC_ENUM_RANGE_MAX 256 - #include #include #include @@ -14,9 +11,12 @@ #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -41,31 +41,19 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum - - char header[COMPRESSED_BLOCK_HEADER_SIZE]; - in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); - - UInt32 size_compressed = unalignedLoad(&header[1]); + UInt32 size_compressed; + UInt32 size_decompressed; + auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */); if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data."); - UInt32 size_decompressed = unalignedLoad(&header[5]); - - auto method_byte = static_cast(header[0]); - auto method = magic_enum::enum_cast(method_byte); - if (method) - DB::writeText(magic_enum::enum_name(*method), out); - else - DB::writeText(fmt::format("UNKNOWN({})", method_byte), out); + DB::writeText(queryToString(codec->getFullCodecDesc()), out); DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); DB::writeChar('\n', out); - - in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); } } diff --git a/src/Compression/getCompressionCodecForFile.cpp b/src/Compression/getCompressionCodecForFile.cpp index 027ee0ac57a..b04e4b6371a 100644 --- a/src/Compression/getCompressionCodecForFile.cpp +++ b/src/Compression/getCompressionCodecForFile.cpp @@ -10,33 +10,50 @@ namespace DB { - using Checksum = CityHash_v1_0_2::uint128; -CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) +CompressionCodecPtr +getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block) { - auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); - read_buffer->ignore(sizeof(Checksum)); + read_buffer.ignore(sizeof(Checksum)); UInt8 header_size = ICompressionCodec::getHeaderSize(); + size_t starting_bytes = read_buffer.count(); PODArray compressed_buffer; compressed_buffer.resize(header_size); - read_buffer->readStrict(compressed_buffer.data(), header_size); + read_buffer.readStrict(compressed_buffer.data(), header_size); uint8_t method = ICompressionCodec::readMethod(compressed_buffer.data()); + size_compressed = unalignedLoad(&compressed_buffer[1]); + size_decompressed = unalignedLoad(&compressed_buffer[5]); if (method == static_cast(CompressionMethodByte::Multiple)) { compressed_buffer.resize(1); - read_buffer->readStrict(compressed_buffer.data(), 1); + read_buffer.readStrict(compressed_buffer.data(), 1); compressed_buffer.resize(1 + compressed_buffer[0]); - read_buffer->readStrict(compressed_buffer.data() + 1, compressed_buffer[0]); + read_buffer.readStrict(compressed_buffer.data() + 1, compressed_buffer[0]); auto codecs_bytes = CompressionCodecMultiple::getCodecsBytesFromData(compressed_buffer.data()); Codecs codecs; for (auto byte : codecs_bytes) codecs.push_back(CompressionCodecFactory::instance().get(byte)); + if (skip_to_next_block) + read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes)); + return std::make_shared(codecs); } + + if (skip_to_next_block) + read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes)); + return CompressionCodecFactory::instance().get(method); } +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) +{ + auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); + UInt32 size_compressed; + UInt32 size_decompressed; + return getCompressionCodecForFile(*read_buffer, size_compressed, size_decompressed, false); +} + } diff --git a/src/Compression/getCompressionCodecForFile.h b/src/Compression/getCompressionCodecForFile.h index b6f22750e4d..535befa37e1 100644 --- a/src/Compression/getCompressionCodecForFile.h +++ b/src/Compression/getCompressionCodecForFile.h @@ -13,4 +13,8 @@ namespace DB /// from metadata. CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path); +/// Same as above which is used by clickhouse-compressor to print compression statistics of each data block. +CompressionCodecPtr +getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block); + } diff --git a/tests/queries/0_stateless/03260_compressor_stat.reference b/tests/queries/0_stateless/03260_compressor_stat.reference new file mode 100644 index 00000000000..ba84b26cc48 --- /dev/null +++ b/tests/queries/0_stateless/03260_compressor_stat.reference @@ -0,0 +1 @@ +CODEC(Delta(1), LZ4) 14 48 diff --git a/tests/queries/0_stateless/03260_compressor_stat.sh b/tests/queries/0_stateless/03260_compressor_stat.sh new file mode 100755 index 00000000000..6efa7b6ee0a --- /dev/null +++ b/tests/queries/0_stateless/03260_compressor_stat.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "Hello, World!" > 03260_test_data + +$CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' --input '03260_test_data' --output '03260_test_out' + +$CLICKHOUSE_COMPRESSOR --stat '03260_test_out' + +rm -f 03260_test_data 03260_test_out From bd9cfaecea93dc3b6d469f3898fcde9506ae5f9b Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 14:35:06 +0800 Subject: [PATCH 154/566] No need to create tmp files --- tests/queries/0_stateless/03260_compressor_stat.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/03260_compressor_stat.sh b/tests/queries/0_stateless/03260_compressor_stat.sh index 6efa7b6ee0a..8a03541763c 100755 --- a/tests/queries/0_stateless/03260_compressor_stat.sh +++ b/tests/queries/0_stateless/03260_compressor_stat.sh @@ -4,10 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "Hello, World!" > 03260_test_data - -$CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' --input '03260_test_data' --output '03260_test_out' - -$CLICKHOUSE_COMPRESSOR --stat '03260_test_out' - -rm -f 03260_test_data 03260_test_out +echo "Hello, World!" | $CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' | $CLICKHOUSE_COMPRESSOR --stat From 6004cb8ff4fc0b751f9cd0821a4d9214cfd63e3e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 11:08:21 +0000 Subject: [PATCH 155/566] Remove current_table_chosen_for_reading_with_parallel_replicas flag --- src/Planner/PlannerJoinTree.cpp | 8 +++----- src/Storages/SelectQueryInfo.h | 2 -- src/Storages/StorageMergeTree.cpp | 4 +--- src/Storages/StorageReplicatedMergeTree.cpp | 5 +---- 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 0007dc9d158..5e29c1a6a81 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -702,8 +702,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres table_expression_query_info.table_expression = table_expression; if (const auto & filter_actions = table_expression_data.getFilterActions()) table_expression_query_info.filter_actions_dag = std::make_shared(filter_actions->clone()); - table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas - = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings[Setting::max_threads]; size_t max_threads_execute_query = settings[Setting::max_threads]; @@ -918,10 +916,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas + const bool other_table_chosen_for_reading_with_parallel_replicas = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) + && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; + if (other_table_chosen_for_reading_with_parallel_replicas) { chassert(query_context->canUseParallelReplicasOnFollower()); diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 7ad6a733c6f..f67274f227a 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -162,8 +162,6 @@ struct SelectQueryInfo /// It's guaranteed to be present in JOIN TREE of `query_tree` QueryTreeNodePtr table_expression; - bool current_table_chosen_for_reading_with_parallel_replicas = false; - /// Table expression modifiers for storage std::optional table_expression_modifiers; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 40cd6e01dba..55f79a54f2e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -276,9 +276,7 @@ void StorageMergeTree::read( } const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree] - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); + && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree]; if (auto plan = reader.read( column_names, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fc3245eafcf..3f1d2bc6a1c 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5640,10 +5640,7 @@ void StorageReplicatedMergeTree::readLocalImpl( const size_t max_block_size, const size_t num_streams) { - const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); - + const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower(); auto plan = reader.read( column_names, storage_snapshot, query_info, local_context, max_block_size, num_streams, From e3890a9de103a560a9804dc4b1fb63c0eb68a569 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 30 Oct 2024 11:12:21 +0000 Subject: [PATCH 156/566] Disable virtual row better. --- .../QueryPlan/Optimizations/applyOrder.cpp | 2 +- .../Optimizations/optimizeReadInOrder.cpp | 16 ++++++++++------ src/Processors/QueryPlan/SortingStep.cpp | 8 ++++++-- src/Processors/QueryPlan/SortingStep.h | 3 ++- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/applyOrder.cpp b/src/Processors/QueryPlan/Optimizations/applyOrder.cpp index 8695f29c26b..51a5aa099ac 100644 --- a/src/Processors/QueryPlan/Optimizations/applyOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/applyOrder.cpp @@ -124,7 +124,7 @@ SortingProperty applyOrder(QueryPlan::Node * parent, SortingProperty * propertie auto common_prefix = commonPrefix(properties->sort_description, sorting_step->getSortDescription()); if (!common_prefix.empty()) /// Buffering is useful for reading from MergeTree, and it is applied in optimizeReadInOrder only. - sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false); + sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false, false); } auto scope = sorting_step->hasPartitions() ? SortingProperty::SortScope::Stream : SortingProperty::SortScope::Global; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 7d9e1a7c5f7..9cb9db8eebe 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -899,7 +899,7 @@ InputOrder buildInputOrderFromUnorderedKeys( return order_info; } -InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & node) +InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, bool & apply_virtual_row, QueryPlan::Node & node) { QueryPlan::Node * reading_node = findReadingStep(node, /*allow_existing_order=*/ false); if (!reading_node) @@ -925,6 +925,8 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (order_info.input_order) { + apply_virtual_row = order_info.virtual_row_conversion != std::nullopt; + bool can_read = reading->requestReadingInOrder( order_info.input_order->used_prefix_of_sorting_key_size, order_info.input_order->direction, @@ -1128,6 +1130,8 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) if (sorting->getType() != SortingStep::Type::Full) return; + bool apply_virtual_row = false; + if (typeid_cast(node.children.front()->step.get())) { auto & union_node = node.children.front(); @@ -1150,7 +1154,7 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) for (auto * child : union_node->children) { - infos.push_back(buildInputOrderInfo(*sorting, *child)); + infos.push_back(buildInputOrderInfo(*sorting, apply_virtual_row, *child)); if (infos.back()) { @@ -1202,13 +1206,13 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) } } - sorting->convertToFinishSorting(*max_sort_descr, use_buffering); + sorting->convertToFinishSorting(*max_sort_descr, use_buffering, false); } - else if (auto order_info = buildInputOrderInfo(*sorting, *node.children.front())) + else if (auto order_info = buildInputOrderInfo(*sorting, apply_virtual_row, *node.children.front())) { /// Use buffering only if have filter or don't have limit. bool use_buffering = order_info->limit == 0; - sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering); + sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering, apply_virtual_row); } } @@ -1350,7 +1354,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit, {}); if (!can_read) return 0; - sorting->convertToFinishSorting(order_info->sort_description_for_merging, false); + sorting->convertToFinishSorting(order_info->sort_description_for_merging, false, false); } return 0; diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..5f0e54faf18 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -147,11 +147,12 @@ void SortingStep::updateLimit(size_t limit_) } } -void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_) +void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_, bool apply_virtual_row_conversions_) { type = Type::FinishSorting; prefix_description = std::move(prefix_description_); use_buffering = use_buffering_; + apply_virtual_row_conversions = apply_virtual_row_conversions_; } void SortingStep::scatterByPartitionIfNeeded(QueryPipelineBuilder& pipeline) @@ -255,7 +256,10 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, limit_, - always_read_till_end); + always_read_till_end, + nullptr, + false, + apply_virtual_row_conversions); pipeline.addTransform(std::move(transform)); } diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 6cdf626d4c8..9366630f0fb 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -81,7 +81,7 @@ public: bool hasPartitions() const { return !partition_by_description.empty(); } - void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_); + void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_, bool apply_virtual_row_conversions_); Type getType() const { return type; } const Settings & getSettings() const { return sort_settings; } @@ -128,6 +128,7 @@ private: UInt64 limit; bool always_read_till_end = false; bool use_buffering = false; + bool apply_virtual_row_conversions = false; Settings sort_settings; }; From 0dcb2b9c2c61674be298b706498763e8fcae7018 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 12:24:39 +0000 Subject: [PATCH 157/566] try another approach --- src/Interpreters/FillingRow.cpp | 315 +++++++++++++++--- src/Interpreters/FillingRow.h | 18 +- .../Transforms/FillingTransform.cpp | 92 +++-- 3 files changed, 348 insertions(+), 77 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 8c5f102bcd6..caf6ad9e3ba 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -1,4 +1,7 @@ +#include #include +#include "Common/Logger.h" +#include "Common/logger_useful.h" #include #include @@ -95,108 +98,326 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, Field next_value = shifted_value; descr.step_func(next_value, step_len); - if (less(next_value, to, getDirection(0))) + // if (less(next_value, to, getDirection(0))) + // { + // shifted_value = std::move(next_value); + // step_len *= 2; + // } + // else + // { + // step_len /= 2; + // } + + if (less(to, next_value, getDirection(0))) { - shifted_value = std::move(next_value); - step_len *= 2; + step_len /= 2; } else { - step_len /= 2; + shifted_value = std::move(next_value); + step_len *= 2; } } return shifted_value; } -std::pair FillingRow::next(const FillingRow & to_row, bool long_jump) +Field findMin(Field a, Field b, Field c, int dir) { + auto logger = getLogger("FillingRow"); + LOG_DEBUG(logger, "a: {} b: {} c: {}", a.dump(), b.dump(), c.dump()); + + if (a.isNull() || (!b.isNull() && less(b, a, dir))) + a = b; + + if (a.isNull() || (!c.isNull() && less(c, a, dir))) + a = c; + + return a; +} + +std::pair FillingRow::next(const FillingRow & next_original_row) +{ + auto logger = getLogger("FillingRow"); + const size_t row_size = size(); size_t pos = 0; /// Find position we need to increment for generating next row. for (; pos < row_size; ++pos) - if (!row[pos].isNull() && !to_row.row[pos].isNull() && !equals(row[pos], to_row.row[pos])) - break; + { + if (row[pos].isNull()) + continue; - if (pos == row_size || less(to_row.row[pos], row[pos], getDirection(pos))) + const auto & descr = getFillDescription(pos); + auto min_constr = findMin(next_original_row[pos], staleness_border[pos], descr.fill_to, getDirection(pos)); + LOG_DEBUG(logger, "min_constr: {}", min_constr); + + if (!min_constr.isNull() && !equals(row[pos], min_constr)) + break; + } + + LOG_DEBUG(logger, "pos: {}", pos); + + if (pos == row_size) return {false, false}; - /// If we have any 'fill_to' value at position greater than 'pos', - /// we need to generate rows up to 'fill_to' value. + const auto & pos_descr = getFillDescription(pos); + + if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) + return {false, false}; + + if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) + return {false, false}; + + if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) + return {false, false}; + + /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, + /// we need to generate rows up to one of this borders. for (size_t i = row_size - 1; i > pos; --i) { auto & fill_column_desc = getFillDescription(i); - if (fill_column_desc.fill_to.isNull() || row[i].isNull()) + if (row[i].isNull()) continue; - auto next_value = doJump(fill_column_desc, i); - if (next_value.has_value() && !equals(next_value.value(), fill_column_desc.fill_to)) - { - row[i] = std::move(next_value.value()); - initFromDefaults(i + 1); - return {true, true}; - } + if (fill_column_desc.fill_to.isNull() && staleness_border[i].isNull()) + continue; + + Field next_value = row[i]; + fill_column_desc.step_func(next_value, 1); + + if (!staleness_border[i].isNull() && !less(next_value, staleness_border[i], getDirection(i))) + continue; + + if (!fill_column_desc.fill_to.isNull() && !less(next_value, fill_column_desc.fill_to, getDirection(i))) + continue; + + row[i] = next_value; + initWithFrom(i + 1); + return {true, true}; } - auto & fill_column_desc = getFillDescription(pos); - std::optional next_value; + auto next_value = row[pos]; + getFillDescription(pos).step_func(next_value, 1); - if (long_jump) - { - next_value = doLongJump(fill_column_desc, pos, to_row[pos]); - - if (!next_value.has_value()) - return {false, false}; - - /// We need value >= to_row[pos] - fill_column_desc.step_func(next_value.value(), 1); - } - else - { - next_value = doJump(fill_column_desc, pos); - } - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) return {false, false}; - row[pos] = std::move(next_value.value()); - if (equals(row[pos], to_row.row[pos])) + if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) + return {false, false}; + + if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) + return {false, false}; + + row[pos] = next_value; + if (equals(row[pos], next_original_row[pos])) { bool is_less = false; for (size_t i = pos + 1; i < row_size; ++i) { - const auto & fill_from = getFillDescription(i).fill_from; - if (!fill_from.isNull()) - row[i] = fill_from; + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; else - row[i] = to_row.row[i]; - is_less |= less(row[i], to_row.row[i], getDirection(i)); + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && + (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + ); } return {is_less, true}; } - initFromDefaults(pos + 1); + initWithFrom(pos + 1); return {true, true}; } -void FillingRow::initFromDefaults(size_t from_pos) +bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) +{ + auto logger = getLogger("FillingRow::shift"); + LOG_DEBUG(logger, "next_original_row: {}, current: {}", next_original_row.dump(), dump()); + + for (size_t pos = 0; pos < size(); ++pos) + { + if (row[pos].isNull() || next_original_row[pos].isNull() || equals(row[pos], next_original_row[pos])) + continue; + + if (less(next_original_row[pos], row[pos], getDirection(pos))) + return false; + + std::optional next_value = doLongJump(getFillDescription(pos), pos, next_original_row[pos]); + + if (!next_value.has_value()) + { + LOG_DEBUG(logger, "next value: {}", "None"); + continue; + } + else + { + LOG_DEBUG(logger, "next value: {}", next_value->dump()); + } + + row[pos] = std::move(next_value.value()); + + if (equals(row[pos], next_original_row[pos])) + { + bool is_less = false; + for (size_t i = pos + 1; i < size(); ++i) + { + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; + else + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && + (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + ); + } + + LOG_DEBUG(logger, "is less: {}", is_less); + + value_changed = true; + return is_less; + } + else + { + // getFillDescription(pos).step_func(row[pos], 1); + initWithTo(/*from_pos=*/pos + 1); + + value_changed = false; + return false; + } + } + + return false; +} + +bool FillingRow::isConstraintComplete(size_t pos) const +{ + auto logger = getLogger("FillingRow::isConstraintComplete"); + + if (row[pos].isNull()) + { + LOG_DEBUG(logger, "disabled"); + return true; /// disabled + } + + const auto & descr = getFillDescription(pos); + int direction = getDirection(pos); + + if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) + { + LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); + return false; + } + + if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], direction)) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); + return false; + } + + return true; +} + +bool FillingRow::isConstraintsComplete() const +{ + for (size_t pos = 0; pos < size(); ++pos) + { + if (isConstraintComplete(pos)) + return true; + } + + return false; +} + +bool FillingRow::isLessStaleness() const +{ + auto logger = getLogger("FillingRow::isLessStaleness"); + + for (size_t pos = 0; pos < size(); ++pos) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}", staleness_border[pos].dump(), row[pos].dump()); + + if (row[pos].isNull() || staleness_border[pos].isNull()) + continue; + + if (less(row[pos], staleness_border[pos], getDirection(pos))) + return true; + } + + return false; +} + +bool FillingRow::isStalenessConfigured() const +{ + for (size_t pos = 0; pos < size(); ++pos) + if (!getFillDescription(pos).fill_staleness.isNull()) + return true; + + return false; +} + +bool FillingRow::isLessFillTo() const +{ + auto logger = getLogger("FillingRow::isLessFillTo"); + + for (size_t pos = 0; pos < size(); ++pos) + { + const auto & descr = getFillDescription(pos); + + LOG_DEBUG(logger, "fill to: {}, row: {}", descr.fill_to.dump(), row[pos].dump()); + + if (row[pos].isNull() || descr.fill_to.isNull()) + continue; + + if (less(row[pos], descr.fill_to, getDirection(pos))) + return true; + } + + return false; +} + +bool FillingRow::isFillToConfigured() const +{ + for (size_t pos = 0; pos < size(); ++pos) + if (!getFillDescription(pos).fill_to.isNull()) + return true; + + return false; +} + + +void FillingRow::initWithFrom(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_from; } +void FillingRow::initWithTo(size_t from_pos) +{ + for (size_t i = from_pos; i < sort_description.size(); ++i) + row[i] = getFillDescription(i).fill_to; +} + void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) { - staleness_border[i] = (*base_row[i])[row_ind]; - const auto& descr = getFillDescription(i); if (!descr.fill_staleness.isNull()) + { + staleness_border[i] = (*base_row[i])[row_ind]; descr.staleness_step_func(staleness_border[i], 1); + } } } diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index dc787173191..a5e622e4c6e 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -25,9 +25,22 @@ public: /// Return pair of boolean /// apply - true if filling values should be inserted into result set /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & to_row, bool long_jump); + std::pair next(const FillingRow & next_original_row); - void initFromDefaults(size_t from_pos = 0); + /// Returns true if need to generate some prefix for to_row + bool shift(const FillingRow & next_original_row, bool& value_changed); + + bool isConstraintComplete(size_t pos) const; + bool isConstraintsComplete() const; + + bool isLessStaleness() const; + bool isStalenessConfigured() const; + + bool isLessFillTo() const; + bool isFillToConfigured() const; + + void initWithFrom(size_t from_pos = 0); + void initWithTo(size_t from_pos = 0); void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } @@ -39,6 +52,7 @@ public: bool isNull() const; int getDirection(size_t index) const { return sort_description[index].direction; } + Field getStalenessBorder(size_t index) const { return staleness_border[index]; } FillColumnDescription & getFillDescription(size_t index) { return sort_description[index].fill_description; } const FillColumnDescription & getFillDescription(size_t index) const { return sort_description[index].fill_description; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 46a670394a5..a3a185929dc 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -11,13 +11,14 @@ #include #include #include +#include "Interpreters/FillingRow.h" #include namespace DB { -constexpr bool debug_logging_enabled = false; +constexpr bool debug_logging_enabled = true; template void logDebug(String key, const T & value, const char * separator = " : ") @@ -507,18 +508,39 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - next_row[i] = filling_row.getFillDescription(i).fill_to; + next_row[i] = Field{}; logDebug("generateSuffixIfNeeded next_row updated", next_row); - if (filling_row >= next_row) + // if (!filling_row.isFillToConfigured() && !filling_row.isStalenessConfigured()) + // { + // logDebug("generateSuffixIfNeeded", "no other constraints, will not generate suffix"); + // return false; + // } + + // logDebug("filling_row.isLessFillTo()", filling_row.isLessFillTo()); + // logDebug("filling_row.isLessStaleness()", filling_row.isLessStaleness()); + + // if (filling_row.isFillToConfigured() && !filling_row.isLessFillTo()) + // { + // logDebug("generateSuffixIfNeeded", "not less than fill to, will not generate suffix"); + // return false; + // } + + // if (filling_row.isStalenessConfigured() && !filling_row.isLessStaleness()) + // { + // logDebug("generateSuffixIfNeeded", "not less than staleness border, will not generate suffix"); + // return false; + // } + + if (!filling_row.isConstraintsComplete()) { - logDebug("generateSuffixIfNeeded", "no need to generate suffix"); + logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; } Block interpolate_block; - if (should_insert_first && filling_row < next_row) + if (should_insert_first) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -533,7 +555,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); + const auto [apply, changed] = filling_row.next(next_row); filling_row_changed = changed; if (!apply) break; @@ -615,7 +637,7 @@ void FillingTransform::transformRange( if (!fill_from.isNull() && !equals(current_value, fill_from)) { - filling_row.initFromDefaults(i); + filling_row.initWithFrom(i); filling_row_inserted = false; if (less(fill_from, current_value, filling_row.getDirection(i))) { @@ -642,24 +664,14 @@ void FillingTransform::transformRange( logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - { - const auto current_value = (*input_fill_columns[i])[row_ind]; - const auto & fill_to = filling_row.getFillDescription(i).fill_to; + next_row[i] = (*input_fill_columns[i])[row_ind]; - logDebug("current value", current_value.dump()); - logDebug("fill to", fill_to.dump()); - - if (fill_to.isNull() || less(current_value, fill_to, filling_row.getDirection(i))) - next_row[i] = current_value; - else - next_row[i] = fill_to; - } logDebug("next_row updated", next_row); /// The condition is true when filling row is initialized by value(s) in FILL FROM, /// and there are row(s) in current range with value(s) < then in the filling row. /// It can happen only once for a range. - if (should_insert_first && filling_row < next_row) + if (should_insert_first && filling_row < next_row && filling_row.isConstraintsComplete()) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -669,7 +681,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); + const auto [apply, changed] = filling_row.next(next_row); filling_row_changed = changed; if (!apply) break; @@ -679,12 +691,36 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); - logDebug("long jump apply", apply); - logDebug("long jump changed", changed); + { + filling_row.initStalenessRow(input_fill_columns, row_ind); - if (changed) - filling_row_changed = true; + bool shift_apply = filling_row.shift(next_row, filling_row_changed); + logDebug("shift_apply", shift_apply); + logDebug("filling_row_changed", filling_row_changed); + + while (shift_apply) + { + logDebug("after shift", filling_row); + + while (true) + { + logDebug("filling_row in prefix", filling_row); + + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + + const auto [apply, changed] = filling_row.next(next_row); + logDebug("filling_row in prefix", filling_row); + + filling_row_changed = changed; + if (!apply) + break; + } + + shift_apply = filling_row.shift(next_row, filling_row_changed); + } + } /// new valid filling row was generated but not inserted, will use it during suffix generation if (filling_row_changed) @@ -697,8 +733,8 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); - /// Init next staleness interval with current row, because we have already made the long jump to it - filling_row.initStalenessRow(input_fill_columns, row_ind); + // /// Init next staleness interval with current row, because we have already made the long jump to it + // filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix @@ -744,7 +780,7 @@ void FillingTransform::transform(Chunk & chunk) /// if no data was processed, then need to initialize filling_row if (last_row.empty()) { - filling_row.initFromDefaults(); + filling_row.initWithFrom(); filling_row_inserted = false; } From b724f2c33141fb0348742d6b48c4b58763450ff7 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 12:24:56 +0000 Subject: [PATCH 158/566] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5e29c1a6a81..ac05f893cd2 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -916,13 +916,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table + const bool no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode + = query_context->canUseParallelReplicasOnFollower() && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; - if (other_table_chosen_for_reading_with_parallel_replicas) + if (no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode) { - chassert(query_context->canUseParallelReplicasOnFollower()); - auto mutable_context = Context::createCopy(query_context); mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( @@ -984,7 +982,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From b9829c703fd4ceae38b5d195ae195c2321e17444 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 13:44:59 +0000 Subject: [PATCH 159/566] change constraints check --- src/Interpreters/FillingRow.cpp | 75 ++++++++++++------- src/Interpreters/FillingRow.h | 6 +- .../Transforms/FillingTransform.cpp | 2 +- 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index caf6ad9e3ba..825b0b1488a 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -3,6 +3,7 @@ #include "Common/Logger.h" #include "Common/logger_useful.h" #include +#include "base/defines.h" #include @@ -122,6 +123,43 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, return shifted_value; } +bool FillingRow::hasSomeConstraints(size_t pos) const +{ + const auto & descr = getFillDescription(pos); + + if (!descr.fill_to.isNull()) + return true; + + if (!descr.fill_staleness.isNull()) + return true; + + return false; +} + +bool FillingRow::isConstraintsComplete(size_t pos) const +{ + auto logger = getLogger("FillingRow::isConstraintComplete"); + chassert(!row[pos].isNull()); + chassert(hasSomeConstraints(pos)); + + const auto & descr = getFillDescription(pos); + int direction = getDirection(pos); + + if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) + { + LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); + return false; + } + + if (!descr.fill_staleness.isNull() && !less(row[pos], staleness_border[pos], direction)) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); + return false; + } + + return true; +} + Field findMin(Field a, Field b, Field c, int dir) { auto logger = getLogger("FillingRow"); @@ -300,43 +338,26 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed return false; } -bool FillingRow::isConstraintComplete(size_t pos) const +bool FillingRow::hasSomeConstraints() const { - auto logger = getLogger("FillingRow::isConstraintComplete"); + for (size_t pos = 0; pos < size(); ++pos) + if (hasSomeConstraints(pos)) + return true; - if (row[pos].isNull()) - { - LOG_DEBUG(logger, "disabled"); - return true; /// disabled - } - - const auto & descr = getFillDescription(pos); - int direction = getDirection(pos); - - if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) - { - LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); - return false; - } - - if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], direction)) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); - return false; - } - - return true; + return false; } bool FillingRow::isConstraintsComplete() const { for (size_t pos = 0; pos < size(); ++pos) { - if (isConstraintComplete(pos)) - return true; + if (row[pos].isNull() || !hasSomeConstraints(pos)) + continue; + + return isConstraintsComplete(pos); } - return false; + return true; } bool FillingRow::isLessStaleness() const diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index a5e622e4c6e..bd5a1b877a5 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -18,6 +18,9 @@ class FillingRow std::optional doJump(const FillColumnDescription & descr, size_t column_ind); std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); + bool hasSomeConstraints(size_t pos) const; + bool isConstraintsComplete(size_t pos) const; + public: explicit FillingRow(const SortDescription & sort_description); @@ -30,7 +33,7 @@ public: /// Returns true if need to generate some prefix for to_row bool shift(const FillingRow & next_original_row, bool& value_changed); - bool isConstraintComplete(size_t pos) const; + bool hasSomeConstraints() const; bool isConstraintsComplete() const; bool isLessStaleness() const; @@ -52,7 +55,6 @@ public: bool isNull() const; int getDirection(size_t index) const { return sort_description[index].direction; } - Field getStalenessBorder(size_t index) const { return staleness_border[index]; } FillColumnDescription & getFillDescription(size_t index) { return sort_description[index].fill_description; } const FillColumnDescription & getFillDescription(size_t index) const { return sort_description[index].fill_description; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index a3a185929dc..ce804c94d8e 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -533,7 +533,7 @@ bool FillingTransform::generateSuffixIfNeeded( // return false; // } - if (!filling_row.isConstraintsComplete()) + if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; From 433523c6f29a55d28930ec86fe268edffc16738e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 13:49:42 +0000 Subject: [PATCH 160/566] update test --- .../03266_with_fill_staleness.reference | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference index 6b090443359..25d7b7c3f24 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.reference +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -50,6 +50,8 @@ staleness 3 seconds 2016-06-15 23:00:21 20 2016-06-15 23:00:22 20 2016-06-15 23:00:25 25 original +2016-06-15 23:00:26 25 +2016-06-15 23:00:27 25 descending order 2016-06-15 23:00:25 25 original 2016-06-15 23:00:24 25 @@ -62,6 +64,7 @@ descending order 2016-06-15 23:00:05 5 original 2016-06-15 23:00:04 5 2016-06-15 23:00:00 0 original +2016-06-15 22:59:59 0 staleness with to and step 2016-06-15 23:00:00 0 original 2016-06-15 23:00:03 0 @@ -86,33 +89,41 @@ staleness with another regular with fill 2016-06-15 23:00:01 1970-01-01 01:00:00 0 2016-06-15 23:00:01 1970-01-01 01:00:01 0 2016-06-15 23:00:01 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 1970-01-01 01:00:01 0 +2016-06-15 23:00:05 1970-01-01 01:00:02 0 2016-06-15 23:00:05 2016-06-15 23:00:05 5 original -2016-06-15 23:00:05 1970-01-01 01:00:01 5 -2016-06-15 23:00:05 1970-01-01 01:00:02 5 2016-06-15 23:00:06 1970-01-01 01:00:00 5 2016-06-15 23:00:06 1970-01-01 01:00:01 5 2016-06-15 23:00:06 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 1970-01-01 01:00:01 5 +2016-06-15 23:00:10 1970-01-01 01:00:02 5 2016-06-15 23:00:10 2016-06-15 23:00:10 10 original -2016-06-15 23:00:10 1970-01-01 01:00:01 10 -2016-06-15 23:00:10 1970-01-01 01:00:02 10 2016-06-15 23:00:11 1970-01-01 01:00:00 10 2016-06-15 23:00:11 1970-01-01 01:00:01 10 2016-06-15 23:00:11 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 1970-01-01 01:00:01 10 +2016-06-15 23:00:15 1970-01-01 01:00:02 10 2016-06-15 23:00:15 2016-06-15 23:00:15 15 original -2016-06-15 23:00:15 1970-01-01 01:00:01 15 -2016-06-15 23:00:15 1970-01-01 01:00:02 15 2016-06-15 23:00:16 1970-01-01 01:00:00 15 2016-06-15 23:00:16 1970-01-01 01:00:01 15 2016-06-15 23:00:16 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 1970-01-01 01:00:01 15 +2016-06-15 23:00:20 1970-01-01 01:00:02 15 2016-06-15 23:00:20 2016-06-15 23:00:20 20 original -2016-06-15 23:00:20 1970-01-01 01:00:01 20 -2016-06-15 23:00:20 1970-01-01 01:00:02 20 2016-06-15 23:00:21 1970-01-01 01:00:00 20 2016-06-15 23:00:21 1970-01-01 01:00:01 20 2016-06-15 23:00:21 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 1970-01-01 01:00:01 20 +2016-06-15 23:00:25 1970-01-01 01:00:02 20 2016-06-15 23:00:25 2016-06-15 23:00:25 25 original -2016-06-15 23:00:25 1970-01-01 01:00:01 25 -2016-06-15 23:00:25 1970-01-01 01:00:02 25 +2016-06-15 23:00:26 1970-01-01 01:00:00 25 +2016-06-15 23:00:26 1970-01-01 01:00:01 25 +2016-06-15 23:00:26 1970-01-01 01:00:02 25 double staleness 2016-06-15 23:00:00 2016-06-15 23:00:00 0 original 2016-06-15 23:00:00 2016-06-15 23:00:02 0 @@ -137,3 +148,4 @@ double staleness 2016-06-15 23:00:25 2016-06-15 23:00:25 25 original 2016-06-15 23:00:25 2016-06-15 23:00:27 25 2016-06-15 23:00:25 2016-06-15 23:00:29 25 +2016-06-15 23:00:26 1970-01-01 01:00:00 25 From e5fe7a0f52625d3460ca04a21982a1af24e0adcd Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 14:35:29 +0000 Subject: [PATCH 161/566] add more tests --- .../0_stateless/03266_with_fill_staleness.sql | 1 + .../03266_with_fill_staleness_cases.reference | 67 +++++++++++++++++++ .../03266_with_fill_staleness_cases.sql | 25 +++++++ 3 files changed, 93 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_cases.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_cases.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql index fff702ffd83..de47d8287ad 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -1,4 +1,5 @@ SET session_timezone='Europe/Amsterdam'; +SET enable_analyzer=1; DROP TABLE IF EXISTS with_fill_staleness; CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference new file mode 100644 index 00000000000..bf8e5bbe331 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference @@ -0,0 +1,67 @@ +test-1 +0 5 10 original +0 5 13 +0 5 16 +0 5 19 +0 5 22 +0 7 0 +7 8 15 original +7 8 18 +7 8 21 +7 8 24 +7 10 0 +14 10 20 original +14 10 23 +14 12 0 +test-2-1 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +test-2-2 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +1 13 +1 14 +2 0 +3 0 +4 0 +test-3-1 +25 -10 +25 -8 +25 -6 +25 -4 +25 -2 +25 0 +25 2 +25 4 +25 6 +25 8 +25 10 +25 12 +25 14 +25 16 +25 17 original +28 -10 +30 18 original +31 -10 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql new file mode 100644 index 00000000000..9e28041c9a1 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql @@ -0,0 +1,25 @@ +SET enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int64, b Int64, c Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test(a, b, c) VALUES (0, 5, 10), (7, 8, 15), (14, 10, 20); + +SELECT 'test-1'; +SELECT *, 'original' AS orig FROM test ORDER BY a, b WITH FILL TO 20 STEP 2 STALENESS 3, c WITH FILL TO 25 step 3; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test2 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test2(a, b) values (1, 0), (1, 4), (1, 8), (1, 12); + +SELECT 'test-2-1'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a, b WITH FILL; + +SELECT 'test-2-2'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a WITH FILL to 20 STALENESS 4, b WITH FILL TO 15 STALENESS 7; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test3 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test3(a, b) VALUES (25, 17), (30, 18); + +SELECT 'test-3-1'; +SELECT a, b, 'original' AS orig FROM test3 ORDER BY a WITH FILL TO 33 STEP 3, b WITH FILL FROM -10 STEP 2; From 2cda4dd9012059b6c287df7c615cef8e310b2d8e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 14:46:56 +0000 Subject: [PATCH 162/566] cleanup --- src/Interpreters/FillingRow.cpp | 97 +------------------ src/Interpreters/FillingRow.h | 12 +-- .../Transforms/FillingTransform.cpp | 30 +----- 3 files changed, 11 insertions(+), 128 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 825b0b1488a..a87ca418b7b 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -68,25 +68,6 @@ bool FillingRow::isNull() const return true; } -std::optional FillingRow::doJump(const FillColumnDescription& descr, size_t column_ind) -{ - Field next_value = row[column_ind]; - descr.step_func(next_value, 1); - - if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) - return std::nullopt; - - if (!descr.fill_staleness.isNull()) - { - if (less(next_value, staleness_border[column_ind], getDirection(column_ind))) - return next_value; - else - return std::nullopt; - } - - return next_value; -} - std::optional FillingRow::doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to) { Field shifted_value = row[column_ind]; @@ -99,16 +80,6 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, Field next_value = shifted_value; descr.step_func(next_value, step_len); - // if (less(next_value, to, getDirection(0))) - // { - // shifted_value = std::move(next_value); - // step_len *= 2; - // } - // else - // { - // step_len /= 2; - // } - if (less(to, next_value, getDirection(0))) { step_len /= 2; @@ -233,7 +204,7 @@ std::pair FillingRow::next(const FillingRow & next_original_row) continue; row[i] = next_value; - initWithFrom(i + 1); + initUsingFrom(i + 1); return {true, true}; } @@ -271,7 +242,7 @@ std::pair FillingRow::next(const FillingRow & next_original_row) return {is_less, true}; } - initWithFrom(pos + 1); + initUsingFrom(pos + 1); return {true, true}; } @@ -327,8 +298,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed } else { - // getFillDescription(pos).step_func(row[pos], 1); - initWithTo(/*from_pos=*/pos + 1); + initUsingTo(/*from_pos=*/pos + 1); value_changed = false; return false; @@ -360,70 +330,13 @@ bool FillingRow::isConstraintsComplete() const return true; } -bool FillingRow::isLessStaleness() const -{ - auto logger = getLogger("FillingRow::isLessStaleness"); - - for (size_t pos = 0; pos < size(); ++pos) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}", staleness_border[pos].dump(), row[pos].dump()); - - if (row[pos].isNull() || staleness_border[pos].isNull()) - continue; - - if (less(row[pos], staleness_border[pos], getDirection(pos))) - return true; - } - - return false; -} - -bool FillingRow::isStalenessConfigured() const -{ - for (size_t pos = 0; pos < size(); ++pos) - if (!getFillDescription(pos).fill_staleness.isNull()) - return true; - - return false; -} - -bool FillingRow::isLessFillTo() const -{ - auto logger = getLogger("FillingRow::isLessFillTo"); - - for (size_t pos = 0; pos < size(); ++pos) - { - const auto & descr = getFillDescription(pos); - - LOG_DEBUG(logger, "fill to: {}, row: {}", descr.fill_to.dump(), row[pos].dump()); - - if (row[pos].isNull() || descr.fill_to.isNull()) - continue; - - if (less(row[pos], descr.fill_to, getDirection(pos))) - return true; - } - - return false; -} - -bool FillingRow::isFillToConfigured() const -{ - for (size_t pos = 0; pos < size(); ++pos) - if (!getFillDescription(pos).fill_to.isNull()) - return true; - - return false; -} - - -void FillingRow::initWithFrom(size_t from_pos) +void FillingRow::initUsingFrom(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_from; } -void FillingRow::initWithTo(size_t from_pos) +void FillingRow::initUsingTo(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_to; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index bd5a1b877a5..d33e3f95541 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -15,7 +15,7 @@ bool equals(const Field & lhs, const Field & rhs); */ class FillingRow { - std::optional doJump(const FillColumnDescription & descr, size_t column_ind); + /// finds last value <= to std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); bool hasSomeConstraints(size_t pos) const; @@ -36,14 +36,8 @@ public: bool hasSomeConstraints() const; bool isConstraintsComplete() const; - bool isLessStaleness() const; - bool isStalenessConfigured() const; - - bool isLessFillTo() const; - bool isFillToConfigured() const; - - void initWithFrom(size_t from_pos = 0); - void initWithTo(size_t from_pos = 0); + void initUsingFrom(size_t from_pos = 0); + void initUsingTo(size_t from_pos = 0); void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index ce804c94d8e..40650b485f8 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -21,7 +21,7 @@ namespace DB constexpr bool debug_logging_enabled = true; template -void logDebug(String key, const T & value, const char * separator = " : ") +static void logDebug(String key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -512,27 +512,6 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row updated", next_row); - // if (!filling_row.isFillToConfigured() && !filling_row.isStalenessConfigured()) - // { - // logDebug("generateSuffixIfNeeded", "no other constraints, will not generate suffix"); - // return false; - // } - - // logDebug("filling_row.isLessFillTo()", filling_row.isLessFillTo()); - // logDebug("filling_row.isLessStaleness()", filling_row.isLessStaleness()); - - // if (filling_row.isFillToConfigured() && !filling_row.isLessFillTo()) - // { - // logDebug("generateSuffixIfNeeded", "not less than fill to, will not generate suffix"); - // return false; - // } - - // if (filling_row.isStalenessConfigured() && !filling_row.isLessStaleness()) - // { - // logDebug("generateSuffixIfNeeded", "not less than staleness border, will not generate suffix"); - // return false; - // } - if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); @@ -637,7 +616,7 @@ void FillingTransform::transformRange( if (!fill_from.isNull() && !equals(current_value, fill_from)) { - filling_row.initWithFrom(i); + filling_row.initUsingFrom(i); filling_row_inserted = false; if (less(fill_from, current_value, filling_row.getDirection(i))) { @@ -732,9 +711,6 @@ void FillingTransform::transformRange( copyRowFromColumns(res_interpolate_columns, input_interpolate_columns, row_ind); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); - - // /// Init next staleness interval with current row, because we have already made the long jump to it - // filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix @@ -780,7 +756,7 @@ void FillingTransform::transform(Chunk & chunk) /// if no data was processed, then need to initialize filling_row if (last_row.empty()) { - filling_row.initWithFrom(); + filling_row.initUsingFrom(); filling_row_inserted = false; } From 7af2e822e7eb486ae95319a09364ea36498bb49b Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 15:22:45 +0000 Subject: [PATCH 163/566] cleanup --- src/Interpreters/FillingRow.cpp | 37 +++++++++------- src/Interpreters/FillingRow.h | 6 +-- .../Transforms/FillingTransform.cpp | 44 ++++++------------- 3 files changed, 36 insertions(+), 51 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index a87ca418b7b..df93ece2af4 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -1,10 +1,10 @@ #include -#include -#include "Common/Logger.h" -#include "Common/logger_useful.h" -#include -#include "base/defines.h" + #include +#include +#include +#include +#include namespace DB @@ -145,7 +145,7 @@ Field findMin(Field a, Field b, Field c, int dir) return a; } -std::pair FillingRow::next(const FillingRow & next_original_row) +bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) { auto logger = getLogger("FillingRow"); @@ -169,18 +169,18 @@ std::pair FillingRow::next(const FillingRow & next_original_row) LOG_DEBUG(logger, "pos: {}", pos); if (pos == row_size) - return {false, false}; + return false; const auto & pos_descr = getFillDescription(pos); if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) - return {false, false}; + return false; if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) - return {false, false}; + return false; if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) - return {false, false}; + return false; /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, /// we need to generate rows up to one of this borders. @@ -205,20 +205,22 @@ std::pair FillingRow::next(const FillingRow & next_original_row) row[i] = next_value; initUsingFrom(i + 1); - return {true, true}; + + value_changed = true; + return true; } auto next_value = row[pos]; getFillDescription(pos).step_func(next_value, 1); if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) - return {false, false}; + return false; if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) - return {false, false}; + return false; if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) - return {false, false}; + return false; row[pos] = next_value; if (equals(row[pos], next_original_row[pos])) @@ -239,11 +241,14 @@ std::pair FillingRow::next(const FillingRow & next_original_row) ); } - return {is_less, true}; + value_changed = true; + return is_less; } initUsingFrom(pos + 1); - return {true, true}; + + value_changed = true; + return true; } bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index d33e3f95541..d4590d7b81c 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -25,10 +25,8 @@ public: explicit FillingRow(const SortDescription & sort_description); /// Generates next row according to fill 'from', 'to' and 'step' values. - /// Return pair of boolean - /// apply - true if filling values should be inserted into result set - /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & next_original_row); + /// Returns true if filling values should be inserted into result set + bool next(const FillingRow & next_original_row, bool& value_changed); /// Returns true if need to generate some prefix for to_row bool shift(const FillingRow & next_original_row, bool& value_changed); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 40650b485f8..f23ffec43de 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -11,7 +11,6 @@ #include #include #include -#include "Interpreters/FillingRow.h" #include @@ -534,9 +533,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); @@ -660,9 +657,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); @@ -670,35 +665,22 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } + /// Initialize staleness border for current row to generate it's prefix + filling_row.initStalenessRow(input_fill_columns, row_ind); + + while (filling_row.shift(next_row, filling_row_changed)) { - filling_row.initStalenessRow(input_fill_columns, row_ind); + logDebug("filling_row after shift", filling_row); - bool shift_apply = filling_row.shift(next_row, filling_row_changed); - logDebug("shift_apply", shift_apply); - logDebug("filling_row_changed", filling_row_changed); - - while (shift_apply) + do { - logDebug("after shift", filling_row); + logDebug("inserting prefix filling_row", filling_row); - while (true) - { - logDebug("filling_row in prefix", filling_row); + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - interpolate(result_columns, interpolate_block); - insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); - copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - - const auto [apply, changed] = filling_row.next(next_row); - logDebug("filling_row in prefix", filling_row); - - filling_row_changed = changed; - if (!apply) - break; - } - - shift_apply = filling_row.shift(next_row, filling_row_changed); - } + } while (filling_row.next(next_row, filling_row_changed)); } /// new valid filling row was generated but not inserted, will use it during suffix generation From ab5738b9f1e87cf8b49b3d74a3bbd05e53c39850 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:11:40 +0000 Subject: [PATCH 164/566] merge constraints --- src/Interpreters/FillingRow.cpp | 92 +++++++------------ src/Interpreters/FillingRow.h | 4 +- .../Transforms/FillingTransform.cpp | 4 +- 3 files changed, 37 insertions(+), 63 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index df93ece2af4..67827567e04 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -32,7 +32,10 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); - staleness_border.resize(sort_description.size()); + + constraints.reserve(sort_description.size()); + for (size_t i = 0; i < size(); ++i) + constraints.push_back(getFillDescription(i).fill_to); } bool FillingRow::operator<(const FillingRow & other) const @@ -96,53 +99,33 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, bool FillingRow::hasSomeConstraints(size_t pos) const { - const auto & descr = getFillDescription(pos); - - if (!descr.fill_to.isNull()) - return true; - - if (!descr.fill_staleness.isNull()) - return true; - - return false; + return !constraints[pos].isNull(); } bool FillingRow::isConstraintsComplete(size_t pos) const { - auto logger = getLogger("FillingRow::isConstraintComplete"); + auto logger = getLogger("FillingRow::isConstraintsComplete"); chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); - const auto & descr = getFillDescription(pos); int direction = getDirection(pos); + LOG_DEBUG(logger, "constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); - if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) - { - LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); - return false; - } - - if (!descr.fill_staleness.isNull() && !less(row[pos], staleness_border[pos], direction)) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); - return false; - } - - return true; + return less(row[pos], constraints[pos], direction); } -Field findMin(Field a, Field b, Field c, int dir) +static const Field & findBorder(const Field & constraint, const Field & next_original, int direction) { - auto logger = getLogger("FillingRow"); - LOG_DEBUG(logger, "a: {} b: {} c: {}", a.dump(), b.dump(), c.dump()); + if (constraint.isNull()) + return next_original; - if (a.isNull() || (!b.isNull() && less(b, a, dir))) - a = b; + if (next_original.isNull()) + return constraint; - if (a.isNull() || (!c.isNull() && less(c, a, dir))) - a = c; + if (less(constraint, next_original, direction)) + return constraint; - return a; + return next_original; } bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) @@ -158,11 +141,10 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (row[pos].isNull()) continue; - const auto & descr = getFillDescription(pos); - auto min_constr = findMin(next_original_row[pos], staleness_border[pos], descr.fill_to, getDirection(pos)); - LOG_DEBUG(logger, "min_constr: {}", min_constr); + const Field & border = findBorder(constraints[pos], next_original_row[pos], getDirection(pos)); + LOG_DEBUG(logger, "border: {}", border); - if (!min_constr.isNull() && !equals(row[pos], min_constr)) + if (!border.isNull() && !equals(row[pos], border)) break; } @@ -171,15 +153,10 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (pos == row_size) return false; - const auto & pos_descr = getFillDescription(pos); - if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) return false; - if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) - return false; - - if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) + if (!constraints[pos].isNull() && !less(row[pos], constraints[pos], getDirection(pos))) return false; /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, @@ -191,16 +168,13 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (row[i].isNull()) continue; - if (fill_column_desc.fill_to.isNull() && staleness_border[i].isNull()) + if (constraints[i].isNull()) continue; Field next_value = row[i]; fill_column_desc.step_func(next_value, 1); - if (!staleness_border[i].isNull() && !less(next_value, staleness_border[i], getDirection(i))) - continue; - - if (!fill_column_desc.fill_to.isNull() && !less(next_value, fill_column_desc.fill_to, getDirection(i))) + if (!less(next_value, constraints[i], getDirection(i))) continue; row[i] = next_value; @@ -216,10 +190,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) return false; - if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) - return false; - - if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) + if (!constraints[pos].isNull() && !less(next_value, constraints[pos], getDirection(pos))) return false; row[pos] = next_value; @@ -236,8 +207,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) is_less |= ( (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && - (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && - (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) ); } @@ -291,8 +261,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed is_less |= ( (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && - (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && - (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) ); } @@ -347,15 +316,20 @@ void FillingRow::initUsingTo(size_t from_pos) row[i] = getFillDescription(i).fill_to; } -void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) +void FillingRow::updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) { const auto& descr = getFillDescription(i); + constraints[i] = descr.fill_to; + if (!descr.fill_staleness.isNull()) { - staleness_border[i] = (*base_row[i])[row_ind]; - descr.staleness_step_func(staleness_border[i], 1); + Field staleness_border = (*base_row[i])[row_ind]; + descr.staleness_step_func(staleness_border, 1); + + if (constraints[i].isNull() || less(staleness_border, constraints[i], getDirection(i))) + constraints[i] = std::move(staleness_border); } } } diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index d4590d7b81c..edcaba02aa7 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -36,7 +36,7 @@ public: void initUsingFrom(size_t from_pos = 0); void initUsingTo(size_t from_pos = 0); - void initStalenessRow(const Columns& base_row, size_t row_ind); + void updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } const Field & operator[](size_t index) const { return row[index]; } @@ -54,7 +54,7 @@ public: private: Row row; - Row staleness_border; + Row constraints; SortDescription sort_description; }; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index f23ffec43de..407a79efb93 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -628,7 +628,7 @@ void FillingTransform::transformRange( } /// Init staleness first interval - filling_row.initStalenessRow(input_fill_columns, range_begin); + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, range_begin); for (size_t row_ind = range_begin; row_ind < range_end; ++row_ind) { @@ -666,7 +666,7 @@ void FillingTransform::transformRange( } /// Initialize staleness border for current row to generate it's prefix - filling_row.initStalenessRow(input_fill_columns, row_ind); + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); while (filling_row.shift(next_row, filling_row_changed)) { From 5b4d55dd3f0ff4393e81a7a36ad092eee46be2c6 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:41:02 +0000 Subject: [PATCH 165/566] move logs under flag --- src/Interpreters/FillingRow.cpp | 33 +++++++++---------- .../Transforms/FillingTransform.cpp | 2 +- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 67827567e04..deb4c765d31 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -10,6 +10,15 @@ namespace DB { +constexpr static bool debug_logging_enabled = true; + +template +static void logDebug(String fmt_str, Args&&... args) +{ + if constexpr (debug_logging_enabled) + LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); +} + bool less(const Field & lhs, const Field & rhs, int direction) { if (direction == -1) @@ -104,12 +113,11 @@ bool FillingRow::hasSomeConstraints(size_t pos) const bool FillingRow::isConstraintsComplete(size_t pos) const { - auto logger = getLogger("FillingRow::isConstraintsComplete"); chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); int direction = getDirection(pos); - LOG_DEBUG(logger, "constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); + logDebug("constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); return less(row[pos], constraints[pos], direction); } @@ -130,7 +138,6 @@ static const Field & findBorder(const Field & constraint, const Field & next_ori bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) { - auto logger = getLogger("FillingRow"); const size_t row_size = size(); size_t pos = 0; @@ -142,13 +149,13 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) continue; const Field & border = findBorder(constraints[pos], next_original_row[pos], getDirection(pos)); - LOG_DEBUG(logger, "border: {}", border); + logDebug("border: {}", border); if (!border.isNull() && !equals(row[pos], border)) break; } - LOG_DEBUG(logger, "pos: {}", pos); + logDebug("pos: {}", pos); if (pos == row_size) return false; @@ -223,8 +230,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) { - auto logger = getLogger("FillingRow::shift"); - LOG_DEBUG(logger, "next_original_row: {}, current: {}", next_original_row.dump(), dump()); + logDebug("next_original_row: {}, current: {}", next_original_row.dump(), dump()); for (size_t pos = 0; pos < size(); ++pos) { @@ -235,16 +241,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed return false; std::optional next_value = doLongJump(getFillDescription(pos), pos, next_original_row[pos]); - - if (!next_value.has_value()) - { - LOG_DEBUG(logger, "next value: {}", "None"); - continue; - } - else - { - LOG_DEBUG(logger, "next value: {}", next_value->dump()); - } + logDebug("jumped to next value: {}", next_value.value_or("Did not complete")); row[pos] = std::move(next_value.value()); @@ -265,7 +262,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed ); } - LOG_DEBUG(logger, "is less: {}", is_less); + logDebug("is less: {}", is_less); value_changed = true; return is_less; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 407a79efb93..81d93a6eadb 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = true; template static void logDebug(String key, const T & value, const char * separator = " : ") From 82783fe020b83425590ab14949d5b5face7c9fd6 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:41:38 +0000 Subject: [PATCH 166/566] disable logs --- src/Interpreters/FillingRow.cpp | 2 +- src/Processors/Transforms/FillingTransform.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index deb4c765d31..3b40c2b6cdd 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -10,7 +10,7 @@ namespace DB { -constexpr static bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = false; template static void logDebug(String fmt_str, Args&&... args) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 81d93a6eadb..dc0bafba3e3 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr static bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = false; template static void logDebug(String key, const T & value, const char * separator = " : ") From b6bd776355171896abb3ef95d2dfdb204799a4b1 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:09:35 +0000 Subject: [PATCH 167/566] cleanup --- src/Interpreters/FillingRow.cpp | 8 ++++---- src/Interpreters/FillingRow.h | 4 ++-- src/Processors/Transforms/FillingTransform.cpp | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 3b40c2b6cdd..98c18e9b2ae 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -13,7 +13,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -static void logDebug(String fmt_str, Args&&... args) +inline static void logDebug(String fmt_str, Args&&... args) { if constexpr (debug_logging_enabled) LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); @@ -111,7 +111,7 @@ bool FillingRow::hasSomeConstraints(size_t pos) const return !constraints[pos].isNull(); } -bool FillingRow::isConstraintsComplete(size_t pos) const +bool FillingRow::isConstraintsSatisfied(size_t pos) const { chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); @@ -288,14 +288,14 @@ bool FillingRow::hasSomeConstraints() const return false; } -bool FillingRow::isConstraintsComplete() const +bool FillingRow::isConstraintsSatisfied() const { for (size_t pos = 0; pos < size(); ++pos) { if (row[pos].isNull() || !hasSomeConstraints(pos)) continue; - return isConstraintsComplete(pos); + return isConstraintsSatisfied(pos); } return true; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index edcaba02aa7..08d624a2405 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -19,7 +19,7 @@ class FillingRow std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); bool hasSomeConstraints(size_t pos) const; - bool isConstraintsComplete(size_t pos) const; + bool isConstraintsSatisfied(size_t pos) const; public: explicit FillingRow(const SortDescription & sort_description); @@ -32,7 +32,7 @@ public: bool shift(const FillingRow & next_original_row, bool& value_changed); bool hasSomeConstraints() const; - bool isConstraintsComplete() const; + bool isConstraintsSatisfied() const; void initUsingFrom(size_t from_pos = 0); void initUsingTo(size_t from_pos = 0); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index dc0bafba3e3..a5c6460db0a 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -20,7 +20,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -static void logDebug(String key, const T & value, const char * separator = " : ") +inline static void logDebug(String key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -511,7 +511,7 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row updated", next_row); - if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) + if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsSatisfied()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; @@ -647,7 +647,7 @@ void FillingTransform::transformRange( /// The condition is true when filling row is initialized by value(s) in FILL FROM, /// and there are row(s) in current range with value(s) < then in the filling row. /// It can happen only once for a range. - if (should_insert_first && filling_row < next_row && filling_row.isConstraintsComplete()) + if (should_insert_first && filling_row < next_row && filling_row.isConstraintsSatisfied()) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); From c8b94a3c61330fb0649ee92ec69ffe6e6059860b Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:21:29 +0000 Subject: [PATCH 168/566] fix empty stream filling --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index a5c6460db0a..4a8965dcfaa 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -503,7 +503,7 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row", next_row); /// Determines if we should insert filling row before start generating next rows - bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || next_row.isNull(); + bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || (next_row.isNull() && !filling_row.isNull()); logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) From a99428fcd9d10da6b6f6fea10d033b485e558b1c Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:25:06 +0000 Subject: [PATCH 169/566] add errors test --- .../0_stateless/03266_with_fill_staleness_errors.reference | 0 .../queries/0_stateless/03266_with_fill_staleness_errors.sql | 5 +++++ 2 files changed, 5 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_errors.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_errors.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference b/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql new file mode 100644 index 00000000000..339747e4343 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql @@ -0,0 +1,5 @@ +SET enable_analyzer=1; + +SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL FROM 0 TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } From 10088a0947aaf16a3ce1664c422d66daea3324d2 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:26:31 +0000 Subject: [PATCH 170/566] extend fuzzer dict with staleness --- tests/fuzz/dictionaries/keywords.dict | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fuzz/dictionaries/keywords.dict b/tests/fuzz/dictionaries/keywords.dict index abaaf9e53b5..a37675ebcad 100644 --- a/tests/fuzz/dictionaries/keywords.dict +++ b/tests/fuzz/dictionaries/keywords.dict @@ -538,6 +538,7 @@ "WITH ADMIN OPTION" "WITH CHECK" "WITH FILL" +"STALENESS" "WITH GRANT OPTION" "WITH NAME" "WITH REPLACE OPTION" From e50176c62f18a95648c6b65627b17a095bdccbe5 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:29:08 +0000 Subject: [PATCH 171/566] improve test --- .../queries/0_stateless/03266_with_fill_staleness_errors.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql index 339747e4343..fbfaf3743ca 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql @@ -1,5 +1,5 @@ SET enable_analyzer=1; SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } -SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } -SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL FROM 0 TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } From 0cfbe95ca69d0bb52578c83570b34f4f40de92df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 30 Oct 2024 21:20:11 +0100 Subject: [PATCH 172/566] Update 03258_multiple_array_joins.sql --- tests/queries/0_stateless/03258_multiple_array_joins.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.sql b/tests/queries/0_stateless/03258_multiple_array_joins.sql index 5afe7725d3f..ddfac1da080 100644 --- a/tests/queries/0_stateless/03258_multiple_array_joins.sql +++ b/tests/queries/0_stateless/03258_multiple_array_joins.sql @@ -1,3 +1,4 @@ +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_multiple_array_join; CREATE TABLE test_multiple_array_join ( From 8245e3d7ef5530d55763700b0c1aeae1697dd26c Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 21:23:29 +0000 Subject: [PATCH 173/566] Fix --- src/Planner/PlannerJoinTree.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index ac05f893cd2..481cb9b8649 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -982,10 +982,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From 4e2693bb466a07ab06d4155a091e6782a495ed45 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 02:01:23 +0000 Subject: [PATCH 174/566] add test --- ...in_order_optimization_with_virtual_row.sql | 5 ++--- ...ization_with_virtual_row_special.reference | 2 ++ ..._optimization_with_virtual_row_special.sql | 21 +++++++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index f66b4be2c69..8826f2c27cf 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -156,7 +156,6 @@ read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge DROP TABLE fixed_prefix; SELECT '========'; --- currently don't support virtual row in this case DROP TABLE IF EXISTS function_pk; CREATE TABLE function_pk @@ -179,7 +178,7 @@ ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1, optimize_read_in_order = 1, -read_in_order_two_level_merge_threshold = 0; --force preliminary merge +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge DROP TABLE function_pk; @@ -214,4 +213,4 @@ SETTINGS read_in_order_two_level_merge_threshold = 0, optimize_read_in_order = 1, max_threads = 2; -DROP TABLE distinct_in_order; +DROP TABLE distinct_in_order; \ No newline at end of file diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference new file mode 100644 index 00000000000..b03759364cf --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference @@ -0,0 +1,2 @@ +dist +src diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql new file mode 100644 index 00000000000..ee7336bdf02 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +-- modified from test_01155_ordinary +DROP DATABASE IF EXISTS test_01155_ordinary; + +SET allow_deprecated_database_ordinary = 1; + +CREATE DATABASE test_01155_ordinary ENGINE = Ordinary; + +USE test_01155_ordinary; + +CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; +INSERT INTO src(s) VALUES ('before moving tables'); +CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_01155_ordinary, src); + +SET enable_analyzer=0; +SELECT _table FROM merge('test_01155_ordinary', '') ORDER BY _table, s; + +DROP TABLE src; +DROP TABLE dist; +DROP DATABASE test_01155_ordinary; \ No newline at end of file From b229fb1664c8ed5b2c19ff569bb94c51e2f8cbec Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Thu, 31 Oct 2024 12:04:24 +0000 Subject: [PATCH 175/566] Check if the mutation query is valid. --- src/Interpreters/MutationsInterpreter.cpp | 3 +++ .../03256_invalid_mutation_query.reference | 0 .../03256_invalid_mutation_query.sql | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+) create mode 100644 tests/queries/0_stateless/03256_invalid_mutation_query.reference create mode 100644 tests/queries/0_stateless/03256_invalid_mutation_query.sql diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 0f25d5ac21c..da99b217341 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1386,6 +1386,9 @@ void MutationsInterpreter::validate() } } + // Make sure the mutations query is valid + prepareQueryAffectedQueryTree(commands, source.getStorage(), context); + QueryPlan plan; initQueryPlan(stages.front(), plan); diff --git a/tests/queries/0_stateless/03256_invalid_mutation_query.reference b/tests/queries/0_stateless/03256_invalid_mutation_query.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03256_invalid_mutation_query.sql b/tests/queries/0_stateless/03256_invalid_mutation_query.sql new file mode 100644 index 00000000000..010f96414d4 --- /dev/null +++ b/tests/queries/0_stateless/03256_invalid_mutation_query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t (x int) ENGINE = MergeTree() ORDER BY (); + +DELETE FROM t WHERE y in (SELECT y FROM t); -- { serverError 47 } +DELETE FROM t WHERE x in (SELECT y FROM t); -- { serverError 47 } +DELETE FROM t WHERE x IN (SELECT * FROM t2); -- { serverError 60 } +ALTER TABLE t DELETE WHERE x in (SELECT y FROM t); -- { serverError 47 } +ALTER TABLE t UPDATE x = 1 WHERE x IN (SELECT y FROM t); -- { serverError 47 } + +ALTER TABLE t ADD COLUMN y int; +DELETE FROM t WHERE y in (SELECT y FROM t); + +CREATE TABLE t2 (x int) ENGINE = MergeTree() ORDER BY (); +DELETE FROM t WHERE x IN (SELECT * FROM t2); + +DROP TABLE t; +DROP TABLE t2; From 0808d7f0fb96e9f6c6536b9033cf2f7499cbb383 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 31 Oct 2024 12:26:46 +0000 Subject: [PATCH 176/566] Fix FULL JOINs again --- src/Planner/Planner.cpp | 3 ++- src/Planner/Planner.h | 1 + src/Planner/PlannerJoinTree.cpp | 15 ++++++++++++--- src/Planner/findParallelReplicasQuery.cpp | 8 +------- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 17277dfe8cd..260462652fc 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,6 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) + , root_planner(true) { } @@ -1537,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index ae78f05cbd4..bf11c9ef9cd 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,6 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; + bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 481cb9b8649..160d7f07d5b 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,6 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, + const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -982,7 +983,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1815,6 +1819,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + const QueryNode & parent_query_node = query_node->as(); auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1850,7 +1855,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * Examples: Distributed, LiveView, Merge storages. */ auto left_table_expression = table_expressions_stack.front(); - auto left_table_expression_query_plan = buildQueryPlanForTableExpression(left_table_expression, + auto left_table_expression_query_plan = buildQueryPlanForTableExpression( + left_table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, @@ -1923,7 +1930,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * table expression in subquery. */ bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); - query_plans_stack.push_back(buildQueryPlanForTableExpression(table_expression, + query_plans_stack.push_back(buildQueryPlanForTableExpression( + table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index d92500e82fc..63c0ce8eb68 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -101,17 +101,11 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) - { query_tree_node = join_node.getLeftTableExpression().get(); - } else if (join_kind == JoinKind::Right) - { query_tree_node = join_node.getRightTableExpression().get(); - } else - { return {}; - } break; } @@ -275,7 +269,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - return nullptr; + return query_node; /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); From 1fd66d0472d90bc6da1d0f04dce8140b83fd6bb7 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:58:27 +0100 Subject: [PATCH 177/566] Update SerializationObject.cpp --- src/DataTypes/Serializations/SerializationObject.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index cf63797b0c2..19e12d777e4 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -365,7 +365,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb auto structure_state = std::make_shared(serialization_version); if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1 || structure_state->serialization_version.value == ObjectSerializationVersion::Value::V2) { - if (structure_state->structure_version.value == ObjectSerializationVersion::Value::V1) + if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1) { /// Skip max_dynamic_paths parameter in V1 serialization version. size_t max_dynamic_paths; From fa5010ba181f7251ebcf9ce09ade01c48fdcdebc Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 14:20:47 +0000 Subject: [PATCH 178/566] fix test --- ...der_optimization_with_virtual_row_special.sql | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql index ee7336bdf02..3d6f9ad391b 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -1,21 +1,19 @@ -- Tags: no-parallel --- modified from test_01155_ordinary -DROP DATABASE IF EXISTS test_01155_ordinary; +-- modified from test_01155_ordinary, to test special optimization path for virtual row +DROP DATABASE IF EXISTS test_03031; -SET allow_deprecated_database_ordinary = 1; +CREATE DATABASE test_03031; -CREATE DATABASE test_01155_ordinary ENGINE = Ordinary; - -USE test_01155_ordinary; +USE test_03031; CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; INSERT INTO src(s) VALUES ('before moving tables'); -CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_01155_ordinary, src); +CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src); SET enable_analyzer=0; -SELECT _table FROM merge('test_01155_ordinary', '') ORDER BY _table, s; +SELECT _table FROM merge('test_03031', '') ORDER BY _table, s; DROP TABLE src; DROP TABLE dist; -DROP DATABASE test_01155_ordinary; \ No newline at end of file +DROP DATABASE test_03031; \ No newline at end of file From 83f434dffb6bad82abdc791179196b32e1a7f347 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Thu, 31 Oct 2024 16:25:17 +0000 Subject: [PATCH 179/566] fix simple path --- src/Processors/Transforms/FillingTransform.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 4a8965dcfaa..dd116a9972a 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -608,9 +608,6 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[range_begin]; const auto & fill_from = filling_row.getFillDescription(i).fill_from; - logDebug("current value", current_value.dump()); - logDebug("fill from", fill_from.dump()); - if (!fill_from.isNull() && !equals(current_value, fill_from)) { filling_row.initUsingFrom(i); @@ -663,6 +660,7 @@ void FillingTransform::transformRange( interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; } /// Initialize staleness border for current row to generate it's prefix @@ -679,6 +677,7 @@ void FillingTransform::transformRange( interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; } while (filling_row.next(next_row, filling_row_changed)); } From 1000ef0e022516536cbd680fa6a206bf5401295c Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Thu, 31 Oct 2024 16:39:31 +0000 Subject: [PATCH 180/566] some improves --- src/Interpreters/FillingRow.cpp | 20 ++++++++----- .../Transforms/FillingTransform.cpp | 30 +++++++++++-------- src/Processors/Transforms/FillingTransform.h | 1 + 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 98c18e9b2ae..384ad669206 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -13,7 +13,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -inline static void logDebug(String fmt_str, Args&&... args) +inline static void logDebug(const char * fmt_str, Args&&... args) { if constexpr (debug_logging_enabled) LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); @@ -117,7 +117,7 @@ bool FillingRow::isConstraintsSatisfied(size_t pos) const chassert(hasSomeConstraints(pos)); int direction = getDirection(pos); - logDebug("constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); + logDebug("constraint: {}, row: {}, direction: {}", constraints[pos], row[pos], direction); return less(row[pos], constraints[pos], direction); } @@ -230,7 +230,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) { - logDebug("next_original_row: {}, current: {}", next_original_row.dump(), dump()); + logDebug("next_original_row: {}, current: {}", next_original_row, *this); for (size_t pos = 0; pos < size(); ++pos) { @@ -318,15 +318,12 @@ void FillingRow::updateConstraintsWithStalenessRow(const Columns& base_row, size for (size_t i = 0; i < size(); ++i) { const auto& descr = getFillDescription(i); - constraints[i] = descr.fill_to; if (!descr.fill_staleness.isNull()) { Field staleness_border = (*base_row[i])[row_ind]; descr.staleness_step_func(staleness_border, 1); - - if (constraints[i].isNull() || less(staleness_border, constraints[i], getDirection(i))) - constraints[i] = std::move(staleness_border); + constraints[i] = findBorder(descr.fill_to, staleness_border, getDirection(i)); } } } @@ -350,3 +347,12 @@ WriteBuffer & operator<<(WriteBuffer & out, const FillingRow & row) } } + +template <> +struct fmt::formatter : fmt::formatter +{ + constexpr auto format(const DB::FillingRow & row, format_context & ctx) const + { + return fmt::format_to(ctx.out(), "{}", row.dump()); + } +}; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index dd116a9972a..ab782f3e521 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -20,7 +20,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -inline static void logDebug(String key, const T & value, const char * separator = " : ") +inline static void logDebug(const char * key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -235,6 +235,7 @@ FillingTransform::FillingTransform( fill_column_positions.push_back(block_position); auto & descr = filling_row.getFillDescription(i); + running_with_staleness |= !descr.fill_staleness.isNull(); const Block & output_header = getOutputPort().getHeader(); const DataTypePtr & type = removeNullable(output_header.getByPosition(block_position).type); @@ -663,23 +664,26 @@ void FillingTransform::transformRange( filling_row_changed = false; } - /// Initialize staleness border for current row to generate it's prefix - filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); - - while (filling_row.shift(next_row, filling_row_changed)) + if (running_with_staleness) { - logDebug("filling_row after shift", filling_row); + /// Initialize staleness border for current row to generate it's prefix + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); - do + while (filling_row.shift(next_row, filling_row_changed)) { - logDebug("inserting prefix filling_row", filling_row); + logDebug("filling_row after shift", filling_row); - interpolate(result_columns, interpolate_block); - insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); - copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - filling_row_changed = false; + do + { + logDebug("inserting prefix filling_row", filling_row); - } while (filling_row.next(next_row, filling_row_changed)); + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; + + } while (filling_row.next(next_row, filling_row_changed)); + } } /// new valid filling row was generated but not inserted, will use it during suffix generation diff --git a/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h index a8866a97103..92ca4fe6c9e 100644 --- a/src/Processors/Transforms/FillingTransform.h +++ b/src/Processors/Transforms/FillingTransform.h @@ -84,6 +84,7 @@ private: SortDescription sort_prefix; const InterpolateDescriptionPtr interpolate_description; /// Contains INTERPOLATE columns + bool running_with_staleness = false; /// True if STALENESS clause was used. FillingRow filling_row; /// Current row, which is used to fill gaps. FillingRow next_row; /// Row to which we need to generate filling rows. bool filling_row_inserted = false; From 77298ef479befda70073216255658f656bf5fba5 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 18:23:06 +0000 Subject: [PATCH 181/566] add setting --- src/Core/Settings.cpp | 3 +++ src/Core/SettingsChangesHistory.cpp | 1 + src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- tests/queries/0_stateless/01786_explain_merge_tree.sh | 2 +- tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql | 1 + .../03031_read_in_order_optimization_with_virtual_row.sql | 2 ++ ...031_read_in_order_optimization_with_virtual_row_explain.sql | 2 +- ...031_read_in_order_optimization_with_virtual_row_special.sql | 2 ++ 8 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 0aecb7cf941..37646dc86cb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2863,6 +2863,9 @@ Possible values: **See Also** - [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order) +)", 0) \ + DECLARE(Bool, read_in_order_use_virtual_row, false, R"( +Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched. )", 0) \ DECLARE(Bool, optimize_read_in_window_order, true, R"( Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 88d39d6d393..4b014e141ac 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,7 @@ static std::initializer_listgetSettingsRef()[Setting::read_in_order_use_virtual_row]) virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); updateSortDescription(); diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.sh b/tests/queries/0_stateless/01786_explain_merge_tree.sh index 828012f56bc..9fb764dcd38 100755 --- a/tests/queries/0_stateless/01786_explain_merge_tree.sh +++ b/tests/queries/0_stateless/01786_explain_merge_tree.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for i in $(seq 0 1) do - CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --enable_analyzer=$i" + CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --read_in_order_use_virtual_row=1 --enable_analyzer=$i" $CH_CLIENT -q "drop table if exists test_index" $CH_CLIENT -q "drop table if exists idx" diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index 7bbdecf5501..4cc05203b6a 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -2,6 +2,7 @@ SET max_threads=0; SET optimize_read_in_order=1; SET optimize_trivial_insert_select = 1; SET read_in_order_two_level_merge_threshold=100; +SET read_in_order_use_virtual_row = 1; DROP TABLE IF EXISTS t_read_in_order; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 8826f2c27cf..0f100287815 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -1,4 +1,6 @@ +SET read_in_order_use_virtual_row = 1; + DROP TABLE IF EXISTS t; CREATE TABLE t diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql index 8cdcb4628ec..8e3f37b37b8 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -1,6 +1,6 @@ -- Tags: no-random-merge-tree-settings, no-object-storage -SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; +SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000, read_in_order_use_virtual_row = 1; DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql index 3d6f9ad391b..52aa71437db 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -7,6 +7,8 @@ CREATE DATABASE test_03031; USE test_03031; +SET read_in_order_use_virtual_row = 1; + CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; INSERT INTO src(s) VALUES ('before moving tables'); CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src); From b9232c20063054525f0c192f528d77d85e1af9ff Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 1 Nov 2024 10:09:54 +0800 Subject: [PATCH 182/566] add uts --- .../0_stateless/03258_quantile_exact_weighted_issue.reference | 2 ++ .../queries/0_stateless/03258_quantile_exact_weighted_issue.sql | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference create mode 100644 tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference new file mode 100644 index 00000000000..69afec5d545 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference @@ -0,0 +1,2 @@ +AggregateFunction(quantilesExactWeighted(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) +AggregateFunction(quantilesExactWeightedInterpolated(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql new file mode 100644 index 00000000000..3069389f4e2 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql @@ -0,0 +1,2 @@ +SELECT toTypeName(quantilesExactWeightedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); +SELECT toTypeName(quantilesExactWeightedInterpolatedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); From a77caf42149ab864a3c96df09d7fc8771362adaa Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 1 Nov 2024 03:41:03 +0000 Subject: [PATCH 183/566] Exempt refreshable materialized views from ignore_empty_sql_security_in_create_view_query --- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a38a7ab45d1..f6586f8bfc2 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1467,7 +1467,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) bool is_secondary_query = getContext()->getZooKeeperMetadataTransaction() && !getContext()->getZooKeeperMetadataTransaction()->isInitialQuery(); auto mode = getLoadingStrictnessLevel(create.attach, /*force_attach*/ false, /*has_force_restore_data_flag*/ false, is_secondary_query || is_restore_from_backup); - if (!create.sql_security && create.supportSQLSecurity() && !getContext()->getServerSettings()[ServerSetting::ignore_empty_sql_security_in_create_view_query]) + if (!create.sql_security && create.supportSQLSecurity() && (create.refresh_strategy || !getContext()->getServerSettings()[ServerSetting::ignore_empty_sql_security_in_create_view_query])) create.sql_security = std::make_shared(); if (create.sql_security) From 9015454b37627712eac4eae5126378ae68d8e98c Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 1 Nov 2024 11:06:21 +0000 Subject: [PATCH 184/566] Add setting --- src/Core/Settings.cpp | 5 +++++ src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/MutationsInterpreter.cpp | 6 ++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 6c269e22c35..17e2e1cc599 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -3640,6 +3640,11 @@ Given that, for example, dictionaries, can be out of sync across nodes, mutation ``` +)", 0) \ + DECLARE(Bool, validate_mutation_query, true, R"( +Validate mutation queries before accepting them. Mutations are executed in the background, and running an invalid query will cause mutations to get stuck, requiring manual intervention. + +Only change this setting if you encounter a backward-incompatible bug. )", 0) \ DECLARE(Seconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, R"( Defines how many seconds a locking request waits before failing. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 3fe3e960dc6..613b9e2281a 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,7 @@ static std::initializer_listgetSettingsRef()[Setting::validate_mutation_query]) + // Make sure the mutation query is valid + prepareQueryAffectedQueryTree(commands, source.getStorage(), context); QueryPlan plan; From 7691b7dd4435d1df5cd43cdf9169277aa9e81996 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 1 Nov 2024 11:06:49 +0000 Subject: [PATCH 185/566] Fix test --- .../integration/test_failed_mutations/test.py | 32 +++++++------------ .../03256_invalid_mutation_query.sql | 2 ++ 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/tests/integration/test_failed_mutations/test.py b/tests/integration/test_failed_mutations/test.py index 5a2bf874da2..8d2ee46e748 100644 --- a/tests/integration/test_failed_mutations/test.py +++ b/tests/integration/test_failed_mutations/test.py @@ -27,6 +27,9 @@ REPLICATED_POSTPONE_MUTATION_LOG = ( POSTPONE_MUTATION_LOG = ( "According to exponential backoff policy, do not perform mutations for the part" ) +FAILING_MUTATION_QUERY = ( + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT throwIf(1))" +) all_nodes = [node_with_backoff, node_no_backoff] @@ -83,17 +86,13 @@ def test_exponential_backoff_with_merge_tree(started_cluster, node, found_in_log assert not node.contains_in_log(POSTPONE_MUTATION_LOG) # Executing incorrect mutation. - node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node.query(FAILING_MUTATION_QUERY) check_logs() node.query("KILL MUTATION WHERE table='test_mutations'") # Check that after kill new parts mutations are postponing. - node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node.query(FAILING_MUTATION_QUERY) check_logs() @@ -101,9 +100,7 @@ def test_exponential_backoff_with_merge_tree(started_cluster, node, found_in_log def test_exponential_backoff_with_replicated_tree(started_cluster): prepare_cluster(True) - node_with_backoff.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node_with_backoff.query(FAILING_MUTATION_QUERY) assert node_with_backoff.wait_for_log_line(REPLICATED_POSTPONE_MUTATION_LOG) assert not node_no_backoff.contains_in_log(REPLICATED_POSTPONE_MUTATION_LOG) @@ -114,7 +111,7 @@ def test_exponential_backoff_create_dependent_table(started_cluster): # Executing incorrect mutation. node_with_backoff.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS validate_mutation_query = 0" ) # Creating dependent table for mutation. @@ -148,9 +145,7 @@ def test_exponential_backoff_setting_override(started_cluster): node.query("INSERT INTO test_mutations SELECT * FROM system.numbers LIMIT 10") # Executing incorrect mutation. - node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node.query(FAILING_MUTATION_QUERY) assert not node.contains_in_log(POSTPONE_MUTATION_LOG) @@ -166,9 +161,7 @@ def test_backoff_clickhouse_restart(started_cluster, replicated_table): node = node_with_backoff # Executing incorrect mutation. - node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node.query(FAILING_MUTATION_QUERY) assert node.wait_for_log_line( REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG ) @@ -193,11 +186,10 @@ def test_no_backoff_after_killing_mutation(started_cluster, replicated_table): node = node_with_backoff # Executing incorrect mutation. - node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1" - ) + node.query(FAILING_MUTATION_QUERY) + # Executing correct mutation. - node.query("ALTER TABLE test_mutations DELETE WHERE x=1") + node.query("ALTER TABLE test_mutations DELETE WHERE x=1") assert node.wait_for_log_line( REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG ) diff --git a/tests/queries/0_stateless/03256_invalid_mutation_query.sql b/tests/queries/0_stateless/03256_invalid_mutation_query.sql index 010f96414d4..2c554cabb9e 100644 --- a/tests/queries/0_stateless/03256_invalid_mutation_query.sql +++ b/tests/queries/0_stateless/03256_invalid_mutation_query.sql @@ -9,6 +9,8 @@ DELETE FROM t WHERE x IN (SELECT * FROM t2); -- { serverError 60 } ALTER TABLE t DELETE WHERE x in (SELECT y FROM t); -- { serverError 47 } ALTER TABLE t UPDATE x = 1 WHERE x IN (SELECT y FROM t); -- { serverError 47 } +DELETE FROM t WHERE x IN (SELECT foo FROM bar) SETTINGS validate_mutation_query = 0; + ALTER TABLE t ADD COLUMN y int; DELETE FROM t WHERE y in (SELECT y FROM t); From ce12f652c728df9513f5e8a940462558413bd58a Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 11:25:21 +0000 Subject: [PATCH 186/566] Fix test flakiness --- .../queries/0_stateless/03246_alter_from_string_to_json.sql.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 index e8760b659dc..2ccf2153699 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -1,3 +1,6 @@ +-- Random settings limits: index_granularity=(None, 60000) +-- Tags: long + set allow_experimental_json_type = 1; set max_block_size = 20000; From 752dfead2c5fc686b64d062b7f032196657295ff Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 13:06:29 +0000 Subject: [PATCH 187/566] Only RIGHT JOINs test --- src/Planner/PlannerJoinTree.cpp | 17 +++-- src/Planner/findParallelReplicasQuery.cpp | 11 ++- .../03254_pr_join_on_dups.reference | 72 ------------------- .../0_stateless/03254_pr_join_on_dups.sql | 28 +------- 4 files changed, 26 insertions(+), 102 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 160d7f07d5b..d79aa626d5e 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - const QueryNode & parent_query_node, + [[maybe_unused]] const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,6 +958,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "parallel_replicas_node={} parent_query_node={}", + UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), + UInt64(&parent_query_node)); + + // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); + /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { @@ -984,9 +992,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } } else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) + ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + // && (!table_join_node + // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node + // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 63c0ce8eb68..8d818daa575 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,11 +265,17 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; + } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return query_node; + } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -303,7 +309,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - + if (!res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 58602bafb5d..95cb0d8cae2 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,34 +88,6 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 -full -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 -full subs -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -227,47 +199,3 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 -self full -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l2 2 2 l3 3 -2 l3 3 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 4 l5 \N -4 l5 \N 4 l6 \N -4 l6 \N 4 l5 \N -4 l6 \N 4 l6 \N -5 l7 \N 5 l7 \N -8 l8 \N 8 l8 \N -9 l9 \N 9 l9 \N -self full nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 0 \N -4 l6 \N 0 \N -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N -self full nullable vs not nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l2 2 -3 l4 4 2 l3 3 -4 l5 \N 3 l4 4 -4 l6 \N 3 l4 4 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 71695c0d486..22e94507c83 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; @@ -29,15 +29,6 @@ select 'right'; select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'right expr'; ---select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; - -select 'full'; -select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; -select 'full subs'; -select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'full expr'; ---select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; @@ -45,7 +36,6 @@ select 'self inner nullable'; select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable vs not nullable'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self inner nullable vs not nullable 2'; select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -55,7 +45,6 @@ select 'self left nullable'; select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self left nullable vs not nullable'; select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self left nullable vs not nullable 2'; select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -65,17 +54,6 @@ select 'self right nullable'; select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self right nullable vs not nullable 2'; ---select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; -select 'self full'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable'; -select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable vs not nullable'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self full nullable vs not nullable 2'; ---select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; - --- drop table X; --- drop table Y; +drop table X sync; +drop table Y sync; From 47ddd7fb6b230e0d9b0d2341e118bd88ba871d07 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 14:33:03 +0000 Subject: [PATCH 188/566] Check suspicious and experimental types in JSON type hints --- src/DataTypes/DataTypeObject.cpp | 9 +++++++++ src/DataTypes/DataTypeObject.h | 2 ++ .../0_stateless/03261_json_hints_types_check.reference | 0 .../queries/0_stateless/03261_json_hints_types_check.sql | 9 +++++++++ 4 files changed, 20 insertions(+) create mode 100644 tests/queries/0_stateless/03261_json_hints_types_check.reference create mode 100644 tests/queries/0_stateless/03261_json_hints_types_check.sql diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 18bfed9c5c3..69ae9b8e906 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -230,6 +230,15 @@ MutableColumnPtr DataTypeObject::createColumn() const return ColumnObject::create(std::move(typed_path_columns), max_dynamic_paths, max_dynamic_types); } +void DataTypeObject::forEachChild(const ChildCallback & callback) const +{ + for (const auto & [path, type] : typed_paths) + { + callback(*type); + type->forEachChild(callback); + } +} + namespace { diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 7eb2e7729de..9321570fb75 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -50,6 +50,8 @@ public: bool equals(const IDataType & rhs) const override; + void forEachChild(const ChildCallback &) const override; + bool hasDynamicSubcolumnsData() const override { return true; } std::unique_ptr getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const override; diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.reference b/tests/queries/0_stateless/03261_json_hints_types_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.sql b/tests/queries/0_stateless/03261_json_hints_types_check.sql new file mode 100644 index 00000000000..a407aa9474b --- /dev/null +++ b/tests/queries/0_stateless/03261_json_hints_types_check.sql @@ -0,0 +1,9 @@ +set allow_experimental_json_type=1; +set allow_experimental_variant_type=0; +set allow_experimental_object_type=0; + +select '{}'::JSON(a LowCardinality(Int128)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select '{}'::JSON(a FixedString(100000)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Variant(Int32)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Object('json')); -- {serverError ILLEGAL_COLUMN} + From 31f761508875de1fdc678429b316e19556538eb4 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 14:52:42 +0000 Subject: [PATCH 189/566] Fix --- src/Planner/Planner.cpp | 4 ++-- src/Planner/Planner.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 260462652fc..4b5a2b903c0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - , root_planner(true) + // , root_planner(true) { } @@ -1538,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index bf11c9ef9cd..8d771c343c3 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - bool root_planner = false; + // bool root_planner = false; }; } From 8f86168c65ad74e6203c59620f4667d0083e3c9e Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 1 Nov 2024 14:53:06 +0000 Subject: [PATCH 190/566] Fix test --- tests/integration/test_failed_mutations/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_failed_mutations/test.py b/tests/integration/test_failed_mutations/test.py index 8d2ee46e748..c7e571ae171 100644 --- a/tests/integration/test_failed_mutations/test.py +++ b/tests/integration/test_failed_mutations/test.py @@ -28,7 +28,7 @@ POSTPONE_MUTATION_LOG = ( "According to exponential backoff policy, do not perform mutations for the part" ) FAILING_MUTATION_QUERY = ( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT throwIf(1))" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT throwIf(1)) SETTINGS allow_nondeterministic_mutations = 1" ) all_nodes = [node_with_backoff, node_no_backoff] @@ -111,7 +111,7 @@ def test_exponential_backoff_create_dependent_table(started_cluster): # Executing incorrect mutation. node_with_backoff.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS validate_mutation_query = 0" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations = 1, validate_mutation_query = 0" ) # Creating dependent table for mutation. From ac0902b08820dcd64cb41ba6bd34e4957fe8eadf Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 15:57:44 +0000 Subject: [PATCH 191/566] Fix --- src/Planner/findParallelReplicasQuery.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 8d818daa575..e89f06d6cc3 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -274,7 +274,8 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr if (stack.top() == query_tree_node.get()) { LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); - return query_node; + return nullptr; + // return query_node; } /// This is needed to avoid infinite recursion. From 67b773dcddc61c01d603b16ac59632e9a8cc4f26 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 1 Nov 2024 16:01:17 +0000 Subject: [PATCH 192/566] Fix style --- tests/integration/test_failed_mutations/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_failed_mutations/test.py b/tests/integration/test_failed_mutations/test.py index c7e571ae171..24b67ff86e5 100644 --- a/tests/integration/test_failed_mutations/test.py +++ b/tests/integration/test_failed_mutations/test.py @@ -27,9 +27,7 @@ REPLICATED_POSTPONE_MUTATION_LOG = ( POSTPONE_MUTATION_LOG = ( "According to exponential backoff policy, do not perform mutations for the part" ) -FAILING_MUTATION_QUERY = ( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT throwIf(1)) SETTINGS allow_nondeterministic_mutations = 1" -) +FAILING_MUTATION_QUERY = "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT throwIf(1)) SETTINGS allow_nondeterministic_mutations = 1" all_nodes = [node_with_backoff, node_no_backoff] From 52fe2f18b08fdaff1d93abf2730096676eb55228 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 1 Nov 2024 16:42:01 +0000 Subject: [PATCH 193/566] rm metadata_version znode creation from restarting thread --- .../ReplicatedMergeTreeRestartingThread.cpp | 25 ++++--------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 93124e634bd..c73c9f6d048 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -31,6 +31,7 @@ namespace ErrorCodes extern const int REPLICA_IS_ALREADY_ACTIVE; extern const int REPLICA_STATUS_CHANGED; extern const int LOGICAL_ERROR; + extern const int SUPPORT_IS_DISABLED; } namespace FailPoints @@ -217,26 +218,10 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() } else { - /// Table was created before 20.4 and was never altered, - /// let's initialize replica metadata version from global metadata version. - - const String & zookeeper_path = storage.zookeeper_path, & replica_path = storage.replica_path; - - Coordination::Stat table_metadata_version_stat; - zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); - - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); - - Coordination::Responses res; - auto code = zookeeper->tryMulti(ops, res); - - if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " - "because table was concurrently altered, will retry"); - - zkutil::KeeperMultiException::check(code, ops, res); + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "It seems you have upgraded from a version earlier than 20.4 straight to one later than 24.10. " + "ClickHouse does not support upgrades that span more than a year. " + "Please update gradually (through intermediate versions)."); } storage.queue.removeCurrentPartsFromMutations(); From 7e476b62d286326445d1a720f483e64fd8eae9d7 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 17:09:00 +0000 Subject: [PATCH 194/566] Fix tests --- tests/queries/0_stateless/03214_json_typed_dynamic_path.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql index 1f6a025825a..eee3d70b8da 100644 --- a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest set allow_experimental_json_type = 1; +set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; insert into test select '{"a" : 42}'; From 38a3c6707525fba84c190e6a7e42f791b2da5659 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 1 Nov 2024 18:17:08 +0000 Subject: [PATCH 195/566] revert unnecessary changes --- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 + src/QueryPipeline/QueryPipelineBuilder.h | 6 - src/QueryPipeline/QueryPlanResourceHolder.h | 2 - src/Storages/MergeTree/MergeTask.cpp | 129 +++++++++++++++--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 19 files changed, 134 insertions(+), 73 deletions(-) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 1560e88ffef..07ee8f4ddef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include @@ -30,18 +29,17 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - temp_data_buffer_.get(), + out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) - , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index b7bb9914cf8..99fd95d82d9 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,8 +11,6 @@ namespace Poco namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -37,7 +35,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -64,8 +62,6 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - std::shared_ptr temp_data_buffer = nullptr; - LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index d4e4ba6aa5f..3a9cf7ee141 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,7 +3,6 @@ #include #include #include -#include namespace DB { @@ -16,7 +15,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index fc300e41026..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,8 +9,6 @@ namespace DB { -class TemporaryDataBuffer; - /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -23,7 +21,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -47,7 +45,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - std::shared_ptr out_row_sources_buf = nullptr; + WriteBuffer * out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index a3a33080f52..cd347d371d9 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include namespace DB { @@ -38,13 +37,12 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) - , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index d3b9837a253..2f23f2a5c4d 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,8 +24,6 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -61,8 +59,6 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. - std::shared_ptr temp_data_buffer = nullptr; - /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 1ceb1f46234..9f124c6ba18 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,7 +1,6 @@ #include #include #include -#include namespace DB { @@ -15,13 +14,12 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) - , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index 6f877459147..e6d20ddac75 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,8 +8,6 @@ namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -24,7 +22,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -39,8 +37,6 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; - std::shared_ptr temp_data_buffer = nullptr; - std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 9b09c802783..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 13330dcff6d..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index fb8e5ce74e3..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index a9d9f4fb619..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 0bdccd4795d..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index 1c03a4d74cd..d99f9a7d1f1 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,6 +20,7 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; + TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 1e274a97a08..a9e5b1535c0 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,12 +197,6 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } - template - void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) - { - (resources.*field).push_back(std::move(resource)); - } - /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index ee2ecc25cd5..10f7f39ab09 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,7 +13,6 @@ class QueryPlan; class Context; struct QueryIdHolder; -class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -34,7 +33,6 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; - std::vector> rows_sources_temporary_file; }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 9c2bd59e7cb..e73bc18557c 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -65,6 +65,11 @@ namespace ProfileEvents extern const Event MergeProjectionStageExecuteMilliseconds; } +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForMerge; +} + namespace DB { namespace Setting @@ -124,6 +129,66 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } + +/// Manages the "rows_sources" temporary file that is used during vertical merge. +class RowsSourcesTemporaryFile : public ITemporaryFileLookup +{ +public: + /// A logical name of the temporary file under which it will be known to the plan steps that use it. + static constexpr auto FILE_ID = "rows_sources"; + + explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) + : temporary_data_on_disk(temporary_data_on_disk_->childScope(CurrentMetrics::TemporaryFilesForMerge)) + { + } + + WriteBuffer & getTemporaryFileForWriting(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); + + tmp_data_buffer = std::make_unique(temporary_data_on_disk.get()); + return *tmp_data_buffer; + } + + std::unique_ptr getTemporaryFileForReading(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (!finalized) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); + + /// tmp_disk might not create real file if no data was written to it. + if (final_size == 0) + return std::make_unique(); + + /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. + return tmp_data_buffer->read(); + } + + /// Returns written data size in bytes + size_t finalizeWriting() + { + if (!tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was not requested for writing"); + + auto stat = tmp_data_buffer->finishWriting(); + finalized = true; + final_size = stat.uncompressed_size; + return final_size; + } + +private: + std::unique_ptr tmp_data_buffer; + TemporaryDataOnDiskScopePtr temporary_data_on_disk; + bool finalized = false; + size_t final_size = 0; +}; + static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -425,7 +490,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); + ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -802,11 +867,24 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; + size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; + size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - ctx->rows_sources_temporary_file->finishWriting(); + size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); + /// In special case, when there is only one source part, and no rows were skipped, we may have + /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total + /// number of input rows. + /// Note that only one byte index is written for each row, so number of rows is equals to the number of bytes written. + if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " + "of bytes written to rows_sources file ({}). It is a bug.", + sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -838,12 +916,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - std::unique_ptr rows_sources_read_buf_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_read_buf(std::move(rows_sources_read_buf_)) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -851,13 +929,15 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { - const auto & header = pipeline.getHeader(); + const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!rows_sources_read_buf) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + + auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); auto transform = std::make_unique( header, @@ -892,7 +972,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - std::unique_ptr rows_sources_read_buf; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -943,7 +1023,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - ctx->rows_sources_temporary_file->read(), + RowsSourcesTemporaryFile::FILE_ID, (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -972,9 +1052,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1347,7 +1427,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - std::shared_ptr rows_sources_temporary_file_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1357,7 +1437,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file(rows_sources_temporary_file_) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1367,7 +1447,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1377,6 +1457,14 @@ public: const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); + WriteBuffer * rows_sources_write_buf = nullptr; + if (!rows_sources_temporary_file_name.empty()) + { + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); + } + switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1389,14 +1477,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_temporary_file, + rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1411,7 +1499,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, cleanup); break; @@ -1424,7 +1512,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; } @@ -1466,7 +1554,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - std::shared_ptr rows_sources_temporary_file; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1635,7 +1723,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge + (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1700,6 +1788,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index a6969e3aa48..53792165987 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -42,6 +42,7 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; +class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -243,7 +244,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -322,7 +323,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 5e9674fb5d6..5efd33ce09a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -111,11 +111,10 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", - prewhere_actions.dumpConditions(), + LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From 2cc2f31d9aebcf170b771be4d21cda63efcaf34e Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 18:18:12 +0000 Subject: [PATCH 196/566] Fix error Invalid number of rows in Chunk with Variant column --- src/Columns/ColumnVariant.cpp | 2 +- .../0_stateless/03261_variant_permutation_bug.reference | 0 tests/queries/0_stateless/03261_variant_permutation_bug.sql | 6 ++++++ 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03261_variant_permutation_bug.reference create mode 100644 tests/queries/0_stateless/03261_variant_permutation_bug.sql diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 564b60e1c1d..d5c8386d35f 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const if (hasOnlyNulls()) { if (limit) - return cloneResized(limit); + return cloneResized(limit ? std::min(size(), limit) : size()); /// If no limit, we can just return current immutable column. return this->getPtr(); diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.reference b/tests/queries/0_stateless/03261_variant_permutation_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.sql b/tests/queries/0_stateless/03261_variant_permutation_bug.sql new file mode 100644 index 00000000000..373dd9e19fa --- /dev/null +++ b/tests/queries/0_stateless/03261_variant_permutation_bug.sql @@ -0,0 +1,6 @@ +set allow_experimental_variant_type=1; +create table test (x UInt64, d Variant(UInt64)) engine=Memory; +insert into test select number, null from numbers(200000); +select d from test order by d::String limit 32213 format Null; +drop table test; + From 6d5c707d2cfc029528ba1a32ceb4cd313e198147 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 21:32:07 +0000 Subject: [PATCH 197/566] Cleanup --- src/Planner/findParallelReplicasQuery.cpp | 14 +++++++------- .../02771_parallel_replicas_analyzer.sql | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index e89f06d6cc3..5db67d7c793 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -97,8 +97,8 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - auto join_kind = join_node.getKind(); - auto join_strictness = join_node.getStrictness(); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) query_tree_node = join_node.getLeftTableExpression().get(); @@ -266,7 +266,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; } @@ -310,10 +310,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - if (!res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); + // if (!res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql index 081077ba460..a2d26a8fc78 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -1,5 +1,5 @@ -- Tags: zookeeper -DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated; +DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated SYNC; CREATE TABLE join_inner_table__fuzz_146_replicated ( `id` UUID, @@ -52,4 +52,4 @@ WHERE GROUP BY is_initial_query, query ORDER BY is_initial_query DESC, c, query; -DROP TABLE join_inner_table__fuzz_146_replicated; +DROP TABLE join_inner_table__fuzz_146_replicated SYNC; From a4e576924b16ed199e3726313f96c241b604d4b6 Mon Sep 17 00:00:00 2001 From: 0xMihalich Date: Sat, 2 Nov 2024 18:48:57 +1000 Subject: [PATCH 198/566] Fix: ERROR: column "attgenerated" does not exist for old PostgreSQL databases Restore support for GreenPlum and older versions of PostgreSQL without affecting existing functionality. --- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 45fd52f27ab..5268dbcb59f 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -307,6 +307,13 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( if (!columns.empty()) columns_part = fmt::format(" AND attname IN ('{}')", boost::algorithm::join(columns, "','")); + /// Bypassing the error of the missing column `attgenerated` in the system table `pg_attribute` for PostgreSQL versions below 12. + /// This trick involves executing a special query to the DBMS in advance to obtain the correct line with comment /// if column has GENERATED. + /// The result of the query will be the name of the column `attgenerated` or an empty string declaration for PostgreSQL version 11 and below. + /// This change does not degrade the function's performance but restores support for older versions and fix ERROR: column "attgenerated" does not exist. + pqxx::result gen_result{tx.exec("select case when current_setting('server_version_num')::int < 120000 then '''''' else 'attgenerated' end as generated")}; + std::string generated = gen_result[0][0].as(); + std::string query = fmt::format( "SELECT attname AS name, " /// column name "format_type(atttypid, atttypmod) AS type, " /// data type @@ -315,11 +322,11 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "atttypid as type_id, " "atttypmod as type_modifier, " "attnum as att_num, " - "attgenerated as generated " /// if column has GENERATED + "{} as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) {}" "AND NOT attisdropped AND attnum > 0 " - "ORDER BY attnum ASC", where, columns_part); + "ORDER BY attnum ASC", generated, where, columns_part); /// Now we use variable `generated` to form query string. End of trick. auto postgres_table_with_schema = postgres_schema.empty() ? postgres_table : doubleQuoteString(postgres_schema) + '.' + doubleQuoteString(postgres_table); table.physical_columns = readNamesAndTypesList(tx, postgres_table_with_schema, query, use_nulls, false); From 1e3f08ab3e48d666cd5e3b02cfecf50915738377 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 19:44:03 +0000 Subject: [PATCH 199/566] Only with analyzer --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 22e94507c83..5f2f209d0b0 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 2530fd233f3c4d81ff7ad6f18ec0e3a73320c8d0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 21:36:02 +0000 Subject: [PATCH 200/566] Added 03261_pr_semi_anti_join --- .../03261_pr_semi_anti_join.reference | 16 +++++++++++ .../0_stateless/03261_pr_semi_anti_join.sql | 27 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.reference create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.sql diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.reference b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference new file mode 100644 index 00000000000..782147f1f6f --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference @@ -0,0 +1,16 @@ +semi left +2 a3 2 b1 +2 a6 2 b1 +4 a5 4 b3 +semi right +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +anti left +0 a1 0 +1 a2 1 +3 a4 3 +anti right +0 5 b6 diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql new file mode 100644 index 00000000000..d2ea3725d6b --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); + +CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); +CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; From c7f970405885d6dae54c9eb94201c662528ab965 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Mon, 4 Nov 2024 09:45:26 +0000 Subject: [PATCH 201/566] Try fix integration test --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..66f96d61b3e 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -366,7 +366,7 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) From 24a7e0f4ee52e47cadd00a41bff80eb3ac614960 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 4 Nov 2024 13:44:36 +0100 Subject: [PATCH 202/566] Fix missing cluster startup for test_quorum_inserts::test_insert_quorum_with_keeper_fail def test_insert_quorum_with_keeper_loss_connection(): > zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) def query( > return self.client.query( E AttributeError: 'NoneType' object has no attribute 'query' CI: https://s3.amazonaws.com/clickhouse-test-reports/71406/8b3ce129456a1f85839a48538780639e2e3c3020/integration_tests__asan__old_analyzer__[6_6]//home/ubuntu/actions-runner/_work/_temp/test/output_dir/integration_run_parallel3_0.log Signed-off-by: Azat Khuzhin --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..66f96d61b3e 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -366,7 +366,7 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) From 935a29485c60038b14e4e8c87c8e021fc05f7928 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 14:32:54 +0000 Subject: [PATCH 203/566] Fix logs --- src/Planner/findParallelReplicasQuery.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 5db67d7c793..314a7f06137 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -273,7 +273,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; // return query_node; } @@ -427,10 +427,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); + // if (res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From 47b1b2c1584babf53eadf062a8421e1ce481580c Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Mon, 4 Nov 2024 14:51:43 +0000 Subject: [PATCH 204/566] Try fix integration test - second attempt --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 66f96d61b3e..f64864185c5 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -368,7 +368,7 @@ def test_insert_quorum_with_ttl(started_cluster): def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( - "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" + "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_loss ON CLUSTER cluster" ) create_query = ( "CREATE TABLE test_insert_quorum_with_keeper_loss" From 876158672c07361f54574c2eefabff5de9e0a48f Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Mon, 4 Nov 2024 17:53:48 +0000 Subject: [PATCH 205/566] Fix integration test: Sync all drop table calls --- tests/integration/test_quorum_inserts/test.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index f64864185c5..350da822c80 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -46,7 +46,7 @@ def started_cluster(): def test_simple_add_replica(started_cluster): - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster SYNC") create_query = ( "CREATE TABLE test_simple " @@ -82,12 +82,12 @@ def test_simple_add_replica(started_cluster): assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") assert "1\t2011-01-01\n" == second.query("SELECT * from test_simple") - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster SYNC") def test_drop_replica_and_achieve_quorum(started_cluster): zero.query( - "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" + "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster SYNC" ) create_query = ( @@ -156,7 +156,7 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): if add_new_data else "test_quorum_insert_with_drop_partition" ) - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster SYNC") create_query = ( f"CREATE TABLE {table_name} ON CLUSTER cluster " @@ -208,7 +208,7 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): assert TSV("") == TSV(zero.query(f"SELECT * FROM {table_name}")) assert TSV("") == TSV(second.query(f"SELECT * FROM {table_name}")) - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster SYNC") @pytest.mark.parametrize(("add_new_data"), [False, True]) @@ -224,8 +224,8 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): if add_new_data else "test_insert_quorum_with_move_partition_destination" ) - zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster") - zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster SYNC") + zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster SYNC") create_source = ( f"CREATE TABLE {source_table_name} ON CLUSTER cluster " @@ -291,12 +291,12 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): assert TSV("") == TSV(zero.query(f"SELECT * FROM {source_table_name}")) assert TSV("") == TSV(second.query(f"SELECT * FROM {source_table_name}")) - zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster") - zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster SYNC") + zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster SYNC") def test_insert_quorum_with_ttl(started_cluster): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC") create_query = ( "CREATE TABLE test_insert_quorum_with_ttl " @@ -363,12 +363,12 @@ def test_insert_quorum_with_ttl(started_cluster): ) ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC") def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( - "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_loss ON CLUSTER cluster" + "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_loss ON CLUSTER cluster SYNC" ) create_query = ( "CREATE TABLE test_insert_quorum_with_keeper_loss" From 64fbc9eb8d328db7013525fd6bb34fe0939b7c68 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Mon, 4 Nov 2024 18:06:08 +0000 Subject: [PATCH 206/566] Style --- tests/integration/test_quorum_inserts/test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 350da822c80..0809d2c003f 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -296,7 +296,9 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC") + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC" + ) create_query = ( "CREATE TABLE test_insert_quorum_with_ttl " @@ -363,7 +365,9 @@ def test_insert_quorum_with_ttl(started_cluster): ) ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC") + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster SYNC" + ) def test_insert_quorum_with_keeper_loss_connection(started_cluster): From a6b55563c73ff10b42569d17ee24457ffff91e4e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 18:32:05 +0000 Subject: [PATCH 207/566] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 37 +++++----- .../03254_pr_join_on_dups.reference | 72 +++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 12 ++++ 3 files changed, 103 insertions(+), 18 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index d79aa626d5e..c2acbd661c8 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - [[maybe_unused]] const QueryNode & parent_query_node, + const QueryTreeNodePtr & parent_join_tree, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,17 +958,22 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "parallel_replicas_node={} parent_query_node={}", - UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), - UInt64(&parent_query_node)); - - // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); - /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { + const bool allow_parallel_replicas_for_table_expression = [](const QueryTreeNodePtr & join_tree_node) + { + const JoinNode * join_node = join_tree_node->as(); + if (!join_node) + return true; + + const auto join_kind = join_node->getKind(); + if (join_kind == JoinKind::Left || join_kind == JoinKind::Right || join_kind == JoinKind::Inner) + return true; + + return false; + }(parent_join_tree); + if (query_context->canUseParallelReplicasCustomKey() && query_context->getClientInfo().distributed_depth == 0) { if (auto cluster = query_context->getClusterForParallelReplicas(); @@ -991,11 +996,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) - // && (!table_join_node - // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node - // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context) && allow_parallel_replicas_for_table_expression) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1828,8 +1829,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - const QueryNode & parent_query_node = query_node->as(); - auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); + const QueryTreeNodePtr & join_tree_node = query_node->as().getJoinTree(); + auto table_expressions_stack = buildTableExpressionsStack(join_tree_node); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1866,7 +1867,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, auto left_table_expression = table_expressions_stack.front(); auto left_table_expression_query_plan = buildQueryPlanForTableExpression( left_table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, @@ -1941,7 +1942,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); query_plans_stack.push_back(buildQueryPlanForTableExpression( table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 95cb0d8cae2..58602bafb5d 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,6 +88,34 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -199,3 +227,47 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 5f2f209d0b0..222f7693090 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -30,6 +30,11 @@ select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; + select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable'; @@ -55,5 +60,12 @@ select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; + drop table X sync; drop table Y sync; From 157e1695d5f8d8dd0962f89a782317a5249ad8eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:02:57 +0100 Subject: [PATCH 208/566] Fix ExecuteScalarSubqueriesMatcher visiting join elements --- src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index d4da038c089..c80852e9ae7 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -63,7 +63,7 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr if (node->as()) { /// Do not go to FROM, JOIN, UNION. - if (child->as() || child->as()) + if (child->as() || child->as() || child->as()) return false; } From b4a3f6d3709b87f5b1a30316b60f042fc1c0f2ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:11:33 +0100 Subject: [PATCH 209/566] Make sure to update table_join children properly --- src/Analyzer/JoinNode.cpp | 10 ++++++++-- src/Interpreters/QueryNormalizer.cpp | 6 ++++++ .../TimeSeries/PrometheusRemoteReadProtocol.cpp | 1 + 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Analyzer/JoinNode.cpp b/src/Analyzer/JoinNode.cpp index bf99c014826..722c1e19b7e 100644 --- a/src/Analyzer/JoinNode.cpp +++ b/src/Analyzer/JoinNode.cpp @@ -48,9 +48,15 @@ ASTPtr JoinNode::toASTTableJoin() const auto join_expression_ast = children[join_expression_child_index]->toAST(); if (is_using_join_expression) - join_ast->using_expression_list = std::move(join_expression_ast); + { + join_ast->using_expression_list = join_expression_ast; + join_ast->children.push_back(join_ast->using_expression_list); + } else - join_ast->on_expression = std::move(join_expression_ast); + { + join_ast->on_expression = join_expression_ast; + join_ast->children.push_back(join_ast->on_expression); + } } return join_ast; diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index a8639906aad..bba30fb5194 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -161,7 +161,13 @@ void QueryNormalizer::visit(ASTTablesInSelectQueryElement & node, const ASTPtr & { auto & join = node.table_join->as(); if (join.on_expression) + { + ASTPtr original_on_expression = join.on_expression; visit(join.on_expression, data); + if (join.on_expression != original_on_expression) + join.children = { join.on_expression }; + } + } } diff --git a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp index df0f6b8bc5c..b8a3b2911b9 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp @@ -245,6 +245,7 @@ namespace table_join->strictness = JoinStrictness::Semi; table_join->on_expression = makeASTFunction("equals", makeASTColumn(data_table_id, TimeSeriesColumnNames::ID), makeASTColumn(tags_table_id, TimeSeriesColumnNames::ID)); + table_join->children.push_back(table_join->on_expression); table->table_join = table_join; auto table_exp = std::make_shared(); From 35a0d08a32302247b3689e887e9a3b72bb9152e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:12:34 +0100 Subject: [PATCH 210/566] RewriteArrayExistsFunctionVisitor: Assert proper child on join expression --- .../RewriteArrayExistsFunctionVisitor.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp index 22ce91d8c67..60bac2fb7a3 100644 --- a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp +++ b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp @@ -20,21 +20,21 @@ void RewriteArrayExistsFunctionMatcher::visit(ASTPtr & ast, Data & data) if (join->using_expression_list) { auto * it = std::find(join->children.begin(), join->children.end(), join->using_expression_list); + if (it == join->children.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Could not find join->using_expression_list in '{}'", join->formatForLogging()); visit(join->using_expression_list, data); - - if (it && *it != join->using_expression_list) - *it = join->using_expression_list; + *it = join->using_expression_list; } if (join->on_expression) { auto * it = std::find(join->children.begin(), join->children.end(), join->on_expression); + if (it == join->children.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Could not find join->on_expression in '{}'", join->formatForLogging()); visit(join->on_expression, data); - - if (it && *it != join->on_expression) - *it = join->on_expression; + *it = join->on_expression; } } } From 389fdd80d36b5073698b87b7a7d24dcc4c6560bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:15:29 +0100 Subject: [PATCH 211/566] Add test for crasher --- ...ptimize_rewrite_array_exists_to_has_crash.reference | 0 ...3261_optimize_rewrite_array_exists_to_has_crash.sql | 10 ++++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference create mode 100644 tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql new file mode 100644 index 00000000000..5a54d86f339 --- /dev/null +++ b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/71382 +DROP TABLE IF EXISTS rewrite; +CREATE TABLE rewrite (c0 Int) ENGINE = Memory(); +SELECT 1 +FROM rewrite +INNER JOIN rewrite AS y ON ( + SELECT 1 +) +INNER JOIN rewrite AS z ON 1 +SETTINGS allow_experimental_analyzer=0, optimize_rewrite_array_exists_to_has=1; From 19422e75b0fbe7fbbe68bef98f10f22ee046db4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:24:06 +0100 Subject: [PATCH 212/566] Style --- src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp index 60bac2fb7a3..2890357494d 100644 --- a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp +++ b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp @@ -6,6 +6,12 @@ namespace DB { + +namespace ErrorCode +{ +extern const int LOGICAL_ERROR; +} + void RewriteArrayExistsFunctionMatcher::visit(ASTPtr & ast, Data & data) { if (auto * func = ast->as()) From 8c5ab63345f385a75caa943f4b50169a13e3b470 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 22:37:57 +0000 Subject: [PATCH 213/566] Cleanup --- src/Planner/Planner.cpp | 3 +-- src/Planner/Planner.h | 1 - src/Planner/PlannerJoinTree.cpp | 6 ------ src/Planner/findParallelReplicasQuery.cpp | 22 +--------------------- 4 files changed, 2 insertions(+), 30 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 4b5a2b903c0..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,6 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - // , root_planner(true) { } @@ -1538,7 +1537,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index 8d771c343c3..ae78f05cbd4 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,6 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - // bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index c2acbd661c8..c1b8f999f22 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -669,12 +669,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "pr_enabled={} table_expression:\n{}", - settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), - table_expression->dumpTree()); - auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 314a7f06137..bda96f0c31f 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,18 +265,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; - } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; - // return query_node; - } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -310,17 +303,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - // if (!res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - std::stack join_nodes; while (query_tree_node || !join_nodes.empty()) { @@ -426,12 +413,7 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - const auto * res = findTableForParallelReplicas(query_tree_node.get()); - // if (res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); - return res; + return findTableForParallelReplicas(query_tree_node.get()); } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -439,8 +421,6 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); From 8c2d1ec7f8ef625c7bfb914a551af183520a3119 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 12:35:23 +0100 Subject: [PATCH 214/566] Allow ExecuteScalarSubqueriesVisitor on ARRAY JOIN --- .../ExecuteScalarSubqueriesVisitor.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index c80852e9ae7..943febf4b0e 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -63,10 +63,22 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr if (node->as()) { /// Do not go to FROM, JOIN, UNION. - if (child->as() || child->as() || child->as()) + if (child->as() || child->as()) return false; } + if (auto tables = node->as()) + { + /// Contrary to what's said in the code block above, ARRAY JOIN needs to resolve the subquery if possible + /// and assign an alias for 02367_optimize_trivial_count_with_array_join to pass. Otherwise it will fail in + /// ArrayJoinedColumnsVisitor (`No alias for non-trivial value in ARRAY JOIN: _a`) + /// This looks 100% as a incomplete code working on top of a bug, but this code has already been made obsolete + /// by the new analyzer, so it's an inconvenience we can live with until we deprecate it. + if (child == tables->array_join) + return true; + return false; + } + return true; } From 996773b205121f55d6f066826dab95b38b49dbbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 12:39:36 +0100 Subject: [PATCH 215/566] Test with both analyzers --- .../03261_optimize_rewrite_array_exists_to_has_crash.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql index 5a54d86f339..e0018632be4 100644 --- a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql +++ b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql @@ -7,4 +7,4 @@ INNER JOIN rewrite AS y ON ( SELECT 1 ) INNER JOIN rewrite AS z ON 1 -SETTINGS allow_experimental_analyzer=0, optimize_rewrite_array_exists_to_has=1; +SETTINGS optimize_rewrite_array_exists_to_has=1; From e198b205092dcb0bec14b8a3a08763cc68a4a1b9 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 29 Oct 2024 21:09:03 +0000 Subject: [PATCH 216/566] CI: Stateless Tests with praktika --- .github/workflows/pr.yaml | 168 ++++++++++++++++++-- ci/__init__.py | 0 ci/docker/stateless-test/Dockerfile | 107 +++++++++++++ ci/docker/stateless-test/requirements.txt | 5 + ci/jobs/__init__.py | 0 ci/jobs/build_clickhouse.py | 65 ++++++-- ci/jobs/fast_test.py | 117 +------------- ci/jobs/functional_stateless_tests.py | 119 +++++++++++++- ci/jobs/scripts/__init__.py | 0 ci/jobs/scripts/clickhouse_proc.py | 144 +++++++++++++++++ ci/jobs/scripts/functional_tests_results.py | 3 + ci/praktika/_settings.py | 3 + ci/praktika/hook_html.py | 110 ++++++++++--- ci/praktika/job.py | 46 +++++- ci/praktika/json.html | 156 +++++++++++------- ci/praktika/mangle.py | 1 - ci/praktika/native_jobs.py | 5 +- ci/praktika/param.py | 8 + ci/praktika/result.py | 19 ++- ci/praktika/runner.py | 20 ++- ci/praktika/s3.py | 2 +- ci/praktika/workflow.py | 1 + ci/praktika/yaml_generator.py | 3 + ci/settings/definitions.py | 38 +++-- ci/workflows/pull_request.py | 53 ++++-- tests/clickhouse-test | 11 +- tests/config/config.d/ssl_certs.xml | 4 +- tests/config/install.sh | 24 ++- tests/docker_scripts/setup_minio.sh | 28 ++-- 29 files changed, 955 insertions(+), 305 deletions(-) create mode 100644 ci/__init__.py create mode 100644 ci/docker/stateless-test/Dockerfile create mode 100644 ci/docker/stateless-test/requirements.txt create mode 100644 ci/jobs/__init__.py create mode 100644 ci/jobs/scripts/__init__.py create mode 100644 ci/jobs/scripts/clickhouse_proc.py create mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 34c794f6088..0c3f74aeac8 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -30,6 +30,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -68,6 +71,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -106,6 +112,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -144,6 +153,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -172,16 +184,19 @@ jobs: python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - build_amd64_debug: + build_amd_debug: runs-on: [builder] needs: [config_workflow, docker_builds] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} - name: "Build amd64 debug" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -205,21 +220,24 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug: + build_amd_release: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd64_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} - name: "Stateless tests (amd, debug)" + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -243,14 +261,137 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_1_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_2_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_non_parallel: + runs-on: [style-checker] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd, debug) (non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -258,6 +399,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | diff --git a/ci/__init__.py b/ci/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile new file mode 100644 index 00000000000..4abd8204f1d --- /dev/null +++ b/ci/docker/stateless-test/Dockerfile @@ -0,0 +1,107 @@ +# docker build -t clickhouse/stateless-test . +FROM ubuntu:22.04 + +# ARG for quick switch to a given ubuntu mirror +ARG apt_archive="http://archive.ubuntu.com" +RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list + +ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" + +# moreutils - provides ts fo FT +# expect, bzip2 - requried by FT +# bsdmainutils - provides hexdump for FT + +# golang version 1.13 on Ubuntu 20 is enough for tests +RUN apt-get update -y \ + && env DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + awscli \ + brotli \ + lz4 \ + expect \ + moreutils \ + bzip2 \ + bsdmainutils \ + golang \ + lsof \ + mysql-client=8.0* \ + ncdu \ + netcat-openbsd \ + nodejs \ + npm \ + odbcinst \ + openjdk-11-jre-headless \ + openssl \ + postgresql-client \ + python3 \ + python3-pip \ + qemu-user-static \ + sqlite3 \ + sudo \ + tree \ + unixodbc \ + rustc \ + cargo \ + zstd \ + file \ + jq \ + pv \ + zip \ + unzip \ + p7zip-full \ + curl \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* + +ARG PROTOC_VERSION=25.1 +RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \ + && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \ + && rm protoc-${PROTOC_VERSION}-linux-x86_64.zip + +COPY requirements.txt / +RUN pip3 install --no-cache-dir -r /requirements.txt + +RUN mkdir -p /tmp/clickhouse-odbc-tmp \ + && cd /tmp/clickhouse-odbc-tmp \ + && curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \ + && mkdir /usr/local/lib64 -p \ + && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \ + && odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \ + && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ + && sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \ + && rm -rf /tmp/clickhouse-odbc-tmp + +ENV TZ=Europe/Amsterdam +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +ENV NUM_TRIES=1 + +# Unrelated to vars in setup_minio.sh, but should be the same there +# to have the same binaries for local running scenario +ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z +ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z +ARG TARGETARCH + +# Download Minio-related binaries +RUN arch=${TARGETARCH:-amd64} \ + && curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o /minio \ + && curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o /mc \ + && chmod +x /mc /minio + +ENV MINIO_ROOT_USER="clickhouse" +ENV MINIO_ROOT_PASSWORD="clickhouse" + +# for minio to work without root +RUN chmod 777 /home +ENV HOME="/home" +ENV TEMP_DIR="/tmp/praktika" +ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" + +RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ + && tar -xvf hadoop-3.3.1.tar.gz \ + && rm -rf hadoop-3.3.1.tar.gz + + +RUN npm install -g azurite@3.30.0 \ + && npm install -g tslib && npm install -g node diff --git a/ci/docker/stateless-test/requirements.txt b/ci/docker/stateless-test/requirements.txt new file mode 100644 index 00000000000..d556d23485f --- /dev/null +++ b/ci/docker/stateless-test/requirements.txt @@ -0,0 +1,5 @@ +Jinja2==3.1.3 +numpy==1.26.4 +requests==2.32.3 +pandas==1.5.3 +scipy==1.12.0 diff --git a/ci/jobs/__init__.py b/ci/jobs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index cfa358b4059..3db88938f23 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,5 +1,6 @@ import argparse +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -14,7 +15,9 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--build-type", + help="Type: __", + default=None, ) parser.add_argument( "--param", @@ -24,6 +27,18 @@ def parse_args(): return parser.parse_args() +CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ +-DCMAKE_BUILD_TYPE={BUILD_TYPE} \ +-DSANITIZE={SANITIZER} \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ +-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ +{AUX_DEFS} \ +-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 \ +-DCOMPILER_CACHE={CACHE_TYPE} \ +-DENABLE_BUILD_PROFILING=1 {DIR}""" + + def main(): args = parse_args() @@ -42,20 +57,45 @@ def main(): cmake_build_type = "Release" sanitizer = "" - if "debug" in args.BUILD_TYPE.lower(): - print("Build type set: debug") - cmake_build_type = "Debug" + if args.build_type and get_param(): + assert ( + False + ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - if "asan" in args.BUILD_TYPE.lower(): - print("Sanitizer set: address") - sanitizer = "address" + build_type = args.build_type or get_param() + assert ( + build_type + ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" + build_type = build_type.lower() # if Environment.is_local_run(): # build_cache_type = "disabled" # else: - build_cache_type = "sccache" + CACHE_TYPE = "sccache" + + if "debug" in build_type: + print("Build type set: debug") + BUILD_TYPE = "Debug" + AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + elif "release" in build_type: + print("Build type set: release") + BUILD_TYPE = "None" + AUX_DEFS = " -DENABLE_TESTS=1 " + + if "asan" in build_type: + print("Sanitizer set: address") + SANITIZER = "address" + else: + SANITIZER = "" + + cmake_cmd = CMAKE_CMD.format( + BUILD_TYPE=BUILD_TYPE, + CACHE_TYPE=CACHE_TYPE, + SANITIZER=SANITIZER, + AUX_DEFS=AUX_DEFS, + DIR=Utils.cwd(), + ) - current_directory = Utils.cwd() build_dir = f"{Settings.TEMP_DIR}/build" res = True @@ -75,12 +115,7 @@ def main(): results.append( Result.create_from_command_execution( name="Cmake configuration", - command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \ - -DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ - -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ - -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ - -DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \ - -DENABLE_BUILD_PROFILING=1 {current_directory}", + command=cmake_cmd, workdir=build_dir, with_log=True, ) diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index dc5e1c975a6..cb7d925fead 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -1,120 +1,13 @@ import argparse -import threading -from pathlib import Path from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -class ClickHouseProc: - def __init__(self): - self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" - self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" - self.config_file = f"{self.ch_config_dir}/config.xml" - self.user_files_path = f"{self.ch_config_dir}/user_files" - self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" - self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" - self.proc = None - self.pid = 0 - nproc = int(Utils.cpu_count() / 2) - self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ - | tee -a \"{self.test_output_file}\"" - # TODO: store info in case of failure - self.info = "" - self.info_file = "" - - Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) - Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) - Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) - Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - - def start(self): - print("Starting ClickHouse server") - Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=True - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - - # self.proc = Shell.run_async(self.command, verbose=True) - - started = False - try: - for _ in range(5): - pid = Shell.get_output(f"cat {self.pid_file}").strip() - if not pid: - Utils.sleep(1) - continue - started = True - print(f"Got pid from fs [{pid}]") - _ = int(pid) - break - except Exception: - pass - - if not started: - stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" - stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" - Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) - return False - - print(f"ClickHouse server started successfully, pid [{pid}]") - return True - - def wait_ready(self): - res, out, err = 0, "", "" - attempts = 30 - delay = 2 - for attempt in range(attempts): - res, out, err = Shell.get_res_stdout_stderr( - 'clickhouse-client --query "select 1"', verbose=True - ) - if out.strip() == "1": - print("Server ready") - break - else: - print(f"Server not ready, wait") - Utils.sleep(delay) - else: - Utils.print_formatted_error( - f"Server not ready after [{attempts*delay}s]", out, err - ) - return False - return True - - def run_fast_test(self): - if Path(self.test_output_file).exists(): - Path(self.test_output_file).unlink() - exit_code = Shell.run(self.fast_test_command) - return exit_code == 0 - - def terminate(self): - print("Terminate ClickHouse process") - timeout = 10 - if self.proc: - Utils.terminate_process_group(self.proc.pid) - - self.proc.terminate() - try: - self.proc.wait(timeout=10) - print(f"Process {self.proc.pid} terminated gracefully.") - except Exception: - print( - f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." - ) - Utils.terminate_process_group(self.proc.pid, force=True) - self.proc.wait() # Wait for the process to be fully killed - print(f"Process {self.proc} was killed.") - - def clone_submodules(): submodules_to_update = [ "contrib/sysroot", @@ -240,7 +133,7 @@ def main(): Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}") results.append( Result.create_from_command_execution( - name="Checkout Submodules for Minimal Build", + name="Checkout Submodules", command=clone_submodules, ) ) @@ -295,8 +188,8 @@ def main(): if res and JobStages.CONFIG in stages: commands = [ f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", - f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", - f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client", + f"cp ./programs/server/config.xml ./programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --fast-test", # f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/", f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml", update_path_ch_config, @@ -310,7 +203,7 @@ def main(): ) res = results[-1].is_ok() - CH = ClickHouseProc() + CH = ClickHouseProc(fast_test=True) if res and JobStages.TEST in stages: stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index dfdd5821a19..d77522ed73a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,31 +1,78 @@ import argparse +import os +from pathlib import Path +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc +from ci.jobs.scripts.functional_tests_results import FTResultsProcessor +from ci.settings.definitions import azure_secret + class JobStages(metaclass=MetaClasses.WithIter): - CHECKOUT_SUBMODULES = "checkout" - CMAKE = "cmake" - BUILD = "build" + INSTALL_CLICKHOUSE = "install" + START = "start" + TEST = "test" def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) parser.add_argument("--param", help="Optional custom job start stage", default=None) return parser.parse_args() +def run_stateless_test( + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int +): + assert not (no_parallel and no_sequiential) + test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + aux = "" + nproc = int(Utils.cpu_count() / 2) + if batch_num and batch_total: + aux = f"--run-by-hash-total {batch_total} --run-by-hash-num {batch_num-1}" + statless_test_command = f"clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ + --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ + {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ + --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ + --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{test_output_file}\"" + if Path(test_output_file).exists(): + Path(test_output_file).unlink() + Shell.run(statless_test_command, verbose=True) + + def main(): args = parse_args() + params = get_param().split(" ") + parallel_or_sequential = None + no_parallel = False + no_sequential = False + if params: + parallel_or_sequential = params[0] + if len(params) > 1: + batch_num, total_batches = map(int, params[1].split("/")) + else: + batch_num, total_batches = 0, 0 + if parallel_or_sequential: + no_parallel = parallel_or_sequential == "non-parallel" + no_sequential = parallel_or_sequential == "parallel" + + os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + verbose=True, + ) stop_watch = Utils.Stopwatch() stages = list(JobStages) - stage = args.param or JobStages.CHECKOUT_SUBMODULES + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" print(f"Job will start from stage [{stage}]") @@ -36,9 +83,65 @@ def main(): res = True results = [] - if res and JobStages.CHECKOUT_SUBMODULES in stages: - info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") - results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + + if res and JobStages.INSTALL_CLICKHOUSE in stages: + commands = [ + f"chmod +x {Settings.INPUT_DIR}/clickhouse", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", + f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # update_path_ch_config, + f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"clickhouse-server --version", + ] + results.append( + Result.create_from_command_execution( + name="Install ClickHouse", command=commands, with_log=True + ) + ) + res = results[-1].is_ok() + + CH = ClickHouseProc() + if res and JobStages.START in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Start ClickHouse Server" + print(step_name) + res = res and CH.start_minio() + res = res and CH.start() + res = res and CH.wait_ready() + results.append( + Result.create_from( + name=step_name, + status=res, + stopwatch=stop_watch_, + files=( + [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] + if not res + else [] + ), + ) + ) + res = results[-1].is_ok() + + if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Tests" + print(step_name) + run_stateless_test( + no_parallel=no_parallel, + no_sequiential=no_sequential, + batch_num=batch_num, + batch_total=total_batches, + ) + results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/scripts/__init__.py b/ci/jobs/scripts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py new file mode 100644 index 00000000000..cc822eab693 --- /dev/null +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -0,0 +1,144 @@ +import threading +import time +from pathlib import Path + +from praktika.settings import Settings +from praktika.utils import Shell, Utils + + +class ClickHouseProc: + BACKUPS_XML = """ + + + local + {CH_RUNTIME_DIR}/var/lib/clickhouse/disks/backups/ + + +""" + + def __init__(self, fast_test=False): + self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" + self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" + self.config_file = f"{self.ch_config_dir}/config.xml" + self.user_files_path = f"{self.ch_config_dir}/user_files" + self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" + self.proc = None + self.pid = 0 + nproc = int(Utils.cpu_count() / 2) + self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{self.test_output_file}\"" + # TODO: store info in case of failure + self.info = "" + self.info_file = "" + + self.minio_cmd = f"tests/docker_scripts/setup_minio.sh stateless 2>&1 > {Settings.OUTPUT_DIR}/minio.log" + + Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) + Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) + Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) + Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") + + if not fast_test: + with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + file.write(self.BACKUPS_XML) + + self.minio_proc = None + + def start_minio(self): + print("Starting minio") + + def run_minio(): + self.minio_proc = Shell.run_async( + self.minio_cmd, verbose=True, suppress_output=True + ) + + thread = threading.Thread(target=run_minio) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + time.sleep(5) + return thread.is_alive() + + def start(self): + print("Starting ClickHouse server") + Shell.check(f"rm {self.pid_file}") + + def run_clickhouse(): + self.proc = Shell.run_async( + self.command, verbose=True, suppress_output=False + ) + + thread = threading.Thread(target=run_clickhouse) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + + started = False + try: + for _ in range(5): + pid = Shell.get_output(f"cat {self.pid_file}").strip() + if not pid: + Utils.sleep(1) + continue + started = True + print(f"Got pid from fs [{pid}]") + _ = int(pid) + break + except Exception: + pass + + if not started: + stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" + stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" + Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) + return False + + print(f"ClickHouse server started successfully, pid [{pid}]") + return True + + def wait_ready(self): + res, out, err = 0, "", "" + attempts = 30 + delay = 2 + for attempt in range(attempts): + res, out, err = Shell.get_res_stdout_stderr( + 'clickhouse-client --query "select 1"', verbose=True + ) + if out.strip() == "1": + print("Server ready") + break + else: + print(f"Server not ready, wait") + Utils.sleep(delay) + else: + Utils.print_formatted_error( + f"Server not ready after [{attempts*delay}s]", out, err + ) + return False + return True + + def run_fast_test(self): + if Path(self.test_output_file).exists(): + Path(self.test_output_file).unlink() + exit_code = Shell.run(self.fast_test_command) + return exit_code == 0 + + def terminate(self): + print("Terminate ClickHouse process") + timeout = 10 + if self.proc: + Utils.terminate_process_group(self.proc.pid) + + self.proc.terminate() + try: + self.proc.wait(timeout=10) + print(f"Process {self.proc.pid} terminated gracefully.") + except Exception: + print( + f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." + ) + Utils.terminate_process_group(self.proc.pid, force=True) + self.proc.wait() # Wait for the process to be fully killed + print(f"Process {self.proc} was killed.") + + if self.minio_proc: + Utils.terminate_process_group(self.minio_proc.pid) diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index aba3e4f7f5b..06989fb0a44 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -232,6 +232,8 @@ class FTResultsProcessor: else: pass + info = f"Total: {s.total - s.skipped}, Failed: {s.failed}" + # TODO: !!! # def test_result_comparator(item): # # sort by status then by check name @@ -253,6 +255,7 @@ class FTResultsProcessor: results=test_results, status=state, files=[self.tests_output_file], + info=info, with_info_from_results=False, ) diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 1777257f484..17da1519e37 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -80,6 +80,8 @@ class _Settings: CI_DB_TABLE_NAME = "" CI_DB_INSERT_TIMEOUT_SEC = 5 + DISABLE_MERGE_COMMIT = True + _USER_DEFINED_SETTINGS = [ "S3_ARTIFACT_PATH", @@ -112,6 +114,7 @@ _USER_DEFINED_SETTINGS = [ "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", ] diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index f4bd4435511..cea84192d0d 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -11,50 +11,112 @@ from praktika.result import Result, ResultInfo from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Utils @dataclasses.dataclass class GitCommit: - date: str - message: str + # date: str + # message: str sha: str @staticmethod - def from_json(json_data: str) -> List["GitCommit"]: + def from_json(file) -> List["GitCommit"]: commits = [] + json_data = None try: - data = json.loads(json_data) - + with open(file, "r", encoding="utf-8") as f: + json_data = json.load(f) commits = [ GitCommit( - message=commit["messageHeadline"], - sha=commit["oid"], - date=commit["committedDate"], + # message=commit["messageHeadline"], + sha=commit["sha"], + # date=commit["committedDate"], ) - for commit in data.get("commits", []) + for commit in json_data ] except Exception as e: print( - f"ERROR: Failed to deserialize commit's data: [{json_data}], ex: [{e}]" + f"ERROR: Failed to deserialize commit's data [{json_data}], ex: [{e}]" ) return commits + @classmethod + def update_s3_data(cls): + env = _Environment.get() + sha = env.SHA + if not sha: + print("WARNING: Failed to retrieve commit sha") + return + commits = cls.pull_from_s3() + for commit in commits: + if sha == commit.sha: + print( + f"INFO: Sha already present in commits data [{sha}] - skip data update" + ) + return + commits.append(GitCommit(sha=sha)) + cls.push_to_s3(commits) + return + + @classmethod + def dump(cls, commits): + commits_ = [] + for commit in commits: + commits_.append(dataclasses.asdict(commit)) + with open(cls.file_name(), "w", encoding="utf8") as f: + json.dump(commits_, f) + + @classmethod + def pull_from_s3(cls): + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"WARNING: failed to cp file [{s3_path}] from s3") + return [] + return cls.from_json(local_path) + + @classmethod + def push_to_s3(cls, commits): + print(f"INFO: push commits data to s3, commits num [{len(commits)}]") + cls.dump(commits) + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_to_s3(s3_path=s3_path, local_path=local_path, text=True): + print(f"WARNING: failed to cp file [{local_path}] to s3") + + @classmethod + def get_s3_prefix(cls, pr_number, branch): + prefix = "" + assert pr_number or branch + if pr_number and pr_number > 0: + prefix += f"{pr_number}" + else: + prefix += f"{branch}" + return prefix + + @classmethod + def file_name(cls): + return f"{Settings.TEMP_DIR}/commits.json" + + # def _get_pr_commits(pr_number): + # res = [] + # if not pr_number: + # return res + # output = Shell.get_output(f"gh pr view {pr_number} --json commits") + # if output: + # res = GitCommit.from_json(output) + # return res + class HtmlRunnerHooks: @classmethod def configure(cls, _workflow): - - def _get_pr_commits(pr_number): - res = [] - if not pr_number: - return res - output = Shell.get_output(f"gh pr view {pr_number} --json commits") - if output: - res = GitCommit.from_json(output) - return res - # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success @@ -106,11 +168,9 @@ class HtmlRunnerHooks: Utils.raise_with_error( "Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed" ) - if env.PR_NUMBER: - commits = _get_pr_commits(env.PR_NUMBER) - # TODO: upload commits data to s3 to visualise it on a report page - print(commits) + # TODO: enable for branch, add commit number limiting + GitCommit.update_s3_data() @classmethod def pre_run(cls, _workflow, _job): diff --git a/ci/praktika/job.py b/ci/praktika/job.py index d0d4232cfa2..99eb08938b8 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -52,30 +52,57 @@ class Job: self, parameter: Optional[List[Any]] = None, runs_on: Optional[List[List[str]]] = None, + provides: Optional[List[List[str]]] = None, + requires: Optional[List[List[str]]] = None, timeout: Optional[List[int]] = None, ): assert ( parameter or runs_on ), "Either :parameter or :runs_on must be non empty list for parametrisation" + if runs_on: + assert isinstance(runs_on, list) and isinstance(runs_on[0], list) if not parameter: parameter = [None] * len(runs_on) if not runs_on: runs_on = [None] * len(parameter) if not timeout: timeout = [None] * len(parameter) + if not provides: + provides = [None] * len(parameter) + if not requires: + requires = [None] * len(parameter) assert ( - len(parameter) == len(runs_on) == len(timeout) - ), "Parametrization lists must be of the same size" + len(parameter) + == len(runs_on) + == len(timeout) + == len(provides) + == len(requires) + ), f"Parametrization lists must be of the same size [{len(parameter)}, {len(runs_on)}, {len(timeout)}, {len(provides)}, {len(requires)}]" res = [] - for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout): + for parameter_, runs_on_, timeout_, provides_, requires_ in zip( + parameter, runs_on, timeout, provides, requires + ): obj = copy.deepcopy(self) + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ if runs_on_: obj.runs_on = runs_on_ if timeout_: obj.timeout = timeout_ + if provides_: + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" + obj.provides = provides_ + if requires_: + assert ( + not obj.requires + ), "Job.Config.requires and parametrize(requires=...) are both set" + obj.requires = requires_ obj.name = obj.get_job_name_with_parameter() res.append(obj) return res @@ -84,13 +111,16 @@ class Job: name, parameter, runs_on = self.name, self.parameter, self.runs_on res = name name_params = [] - if isinstance(parameter, list) or isinstance(parameter, dict): - name_params.append(json.dumps(parameter)) - elif parameter is not None: - name_params.append(parameter) - if runs_on: + if parameter: + if isinstance(parameter, list) or isinstance(parameter, dict): + name_params.append(json.dumps(parameter)) + else: + name_params.append(parameter) + elif runs_on: assert isinstance(runs_on, list) name_params.append(json.dumps(runs_on)) + else: + assert False if name_params: name_params = [str(param) for param in name_params] res += f" ({', '.join(name_params)})" diff --git a/ci/praktika/json.html b/ci/praktika/json.html index af03ed702f8..f86a7b27ecb 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -200,10 +200,7 @@ } th.name-column, td.name-column { - max-width: 400px; /* Set the maximum width for the column */ - white-space: nowrap; /* Prevent text from wrapping */ - overflow: hidden; /* Hide the overflowed text */ - text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */ + min-width: 350px; } th.status-column, td.status-column { @@ -364,7 +361,6 @@ } function addKeyValueToStatus(key, value, options = null) { - const statusContainer = document.getElementById('status-container'); let keyValuePair = document.createElement('div'); @@ -374,27 +370,40 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - let valueElement - if (value) { - valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - } else if (options) { + let valueElement; + + if (options) { + // Create dropdown if options are provided valueElement = document.createElement('select'); valueElement.className = 'dropdown-value'; + + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue.slice(0, 10); + + // Set the initially selected option + if (optionValue === value) { + option.selected = true; + } + + valueElement.appendChild(option); + }); + + // Update the URL parameter when the selected value changes valueElement.addEventListener('change', (event) => { const selectedValue = event.target.value; updateUrlParameter(key, selectedValue); }); - options.forEach(optionValue => { - const option = document.createElement('option'); - option.value = optionValue; - option.textContent = optionValue; - valueElement.appendChild(option); - }); + } else { + // Create a simple text display if no options are provided + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value || 'N/A'; // Display 'N/A' if value is null } - keyValuePair.appendChild(keyElement) - keyValuePair.appendChild(valueElement) + + keyValuePair.appendChild(keyElement); + keyValuePair.appendChild(valueElement); statusContainer.appendChild(keyValuePair); } @@ -518,12 +527,12 @@ const columns = ['name', 'status', 'start_time', 'duration', 'info']; const columnSymbols = { - name: '📂', - status: '⏯️', + name: '🗂️', + status: '🧾', start_time: '🕒', duration: '⏳', - info: 'ℹ️', - files: '📄' + info: '📝', + files: '📎' }; function createResultsTable(results, nest_level) { @@ -532,16 +541,14 @@ const thead = document.createElement('thead'); const tbody = document.createElement('tbody'); - // Get the current URL parameters - const currentUrl = new URL(window.location.href); - // Create table headers based on the fixed columns const headerRow = document.createElement('tr'); columns.forEach(column => { const th = document.createElement('th'); - th.textContent = th.textContent = columnSymbols[column] || column; + th.textContent = columnSymbols[column] || column; th.style.cursor = 'pointer'; // Make headers clickable - th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table + th.setAttribute('data-sort-direction', 'asc'); // Default sort direction + th.addEventListener('click', () => sortTable(results, column, columnSymbols[column] || column, tbody, nest_level, columns)); // Add click event to sort the table headerRow.appendChild(th); }); thead.appendChild(headerRow); @@ -605,39 +612,33 @@ }); } - function sortTable(results, key, tbody, nest_level) { + function sortTable(results, column, key, tbody, nest_level, columns) { // Find the table header element for the given key - let th = null; - const tableHeaders = document.querySelectorAll('th'); // Select all table headers - tableHeaders.forEach(header => { - if (header.textContent.trim().toLowerCase() === key.toLowerCase()) { - th = header; - } - }); + const tableHeaders = document.querySelectorAll('th'); + let th = Array.from(tableHeaders).find(header => header.textContent === key); if (!th) { console.error(`No table header found for key: ${key}`); return; } - // Determine the current sort direction - let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true; + const ascending = th.getAttribute('data-sort-direction') === 'asc'; + th.setAttribute('data-sort-direction', ascending ? 'desc' : 'asc'); - // Toggle the sort direction for the next click - th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc'); - - // Sort the results array by the given key results.sort((a, b) => { - if (a[key] < b[key]) return ascending ? -1 : 1; - if (a[key] > b[key]) return ascending ? 1 : -1; + if (a[column] < b[column]) return ascending ? -1 : 1; + if (a[column] > b[column]) return ascending ? 1 : -1; return 0; }); + // Clear the existing rows in tbody + tbody.innerHTML = ''; + // Re-populate the table with sorted data populateTableRows(tbody, results, columns, nest_level); } - function loadJSON(PR, sha, nameParams) { + function loadResultsJSON(PR, sha, nameParams) { const infoElement = document.getElementById('info-container'); let lastModifiedTime = null; const task = nameParams[0].toLowerCase(); @@ -753,22 +754,61 @@ } }); - if (PR) { - addKeyValueToStatus("PR", PR) - } else { - console.error("TODO") - } - addKeyValueToStatus("sha", null, [sha, 'lala']); - if (nameParams[1]) { - addKeyValueToStatus("job", nameParams[1]); - } - addKeyValueToStatus("workflow", nameParams[0]); + let path_commits_json = ''; + let commitsArray = []; - if (PR && sha && root_name) { - loadJSON(PR, sha, nameParams); + if (PR) { + addKeyValueToStatus("PR", PR); + const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', ''); + path_commits_json = `${baseUrl}/${encodeURIComponent(PR)}/commits.json`; } else { - document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + // Placeholder for a different path when PR is missing + console.error("PR parameter is missing. Setting alternate commits path."); + path_commits_json = '/path/to/alternative/commits.json'; } + + function loadCommitsArray(path) { + return fetch(path, { cache: "no-cache" }) + .then(response => { + if (!response.ok) { + console.error(`HTTP error! status: ${response.status}`) + return []; + } + return response.json(); + }) + .then(data => { + if (Array.isArray(data) && data.every(item => typeof item === 'object' && item.hasOwnProperty('sha'))) { + return data.map(item => item.sha); + } else { + throw new Error('Invalid data format: expected array of objects with a "sha" key'); + } + }) + .catch(error => { + console.error('Error loading commits JSON:', error); + return []; // Return an empty array if an error occurs + }); + } + + loadCommitsArray(path_commits_json) + .then(data => { + commitsArray = data; + }) + .finally(() => { + // Proceed with the rest of the initialization + addKeyValueToStatus("sha", sha || "latest", commitsArray.concat(["latest"])); + + if (nameParams[1]) { + addKeyValueToStatus("job", nameParams[1]); + } + addKeyValueToStatus("workflow", nameParams[0]); + + // Check if all required parameters are present to load JSON + if (PR && sha && root_name) { + loadResultsJSON(PR, sha, nameParams); + } else { + document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + } + }); } window.onload = init; diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index 89fc52cf849..bca33f9e660 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -58,7 +58,6 @@ def _update_workflow_artifacts(workflow): artifact_job = {} for job in workflow.jobs: for artifact_name in job.provides: - assert artifact_name not in artifact_job artifact_job[artifact_name] = job.name for artifact in workflow.artifacts: artifact._provided_by = artifact_job[artifact.name] diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index f7fd4ca190b..16ffa9056e9 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -151,7 +151,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): status = Result.Status.ERROR print("ERROR: ", info) else: - Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate") + assert Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika yaml") exit_code, output, err = Shell.get_res_stdout_stderr( f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}" ) @@ -250,6 +250,9 @@ def _config_workflow(workflow: Workflow.Config, job_name): info_lines.append(job_name + ": " + info) results.append(result_) + if workflow.enable_merge_commit: + assert False, "NOT implemented" + # config: if workflow.dockers: print("Calculate docker's digests") diff --git a/ci/praktika/param.py b/ci/praktika/param.py new file mode 100644 index 00000000000..f5727198e0d --- /dev/null +++ b/ci/praktika/param.py @@ -0,0 +1,8 @@ +from praktika._environment import _Environment + + +# TODO: find better place and/or right storage for parameter +def get_param(): + env = _Environment.get() + assert env.PARAMETER + return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 2ba8309ad60..f473cf3ed05 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -1,7 +1,6 @@ import dataclasses import datetime import sys -from collections.abc import Container from pathlib import Path from typing import Any, Dict, List, Optional @@ -68,8 +67,9 @@ class Result(MetaClasses.Serializable): if isinstance(status, bool): status = Result.Status.SUCCESS if status else Result.Status.FAILED if not results and not status: - print("ERROR: Either .results or .status must be provided") - raise + Utils.raise_with_error( + f"Either .results ({results}) or .status ({status}) must be provided" + ) if not name: name = _Environment.get().JOB_NAME if not name: @@ -78,10 +78,10 @@ class Result(MetaClasses.Serializable): result_status = status or Result.Status.SUCCESS infos = [] if info: - if isinstance(info, Container): - infos += info + if isinstance(info, str): + infos += [info] else: - infos.append(info) + infos += info if results and not status: for result in results: if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED): @@ -112,7 +112,7 @@ class Result(MetaClasses.Serializable): return self.status not in (Result.Status.PENDING, Result.Status.RUNNING) def is_running(self): - return self.status not in (Result.Status.RUNNING,) + return self.status in (Result.Status.RUNNING,) def is_ok(self): return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS) @@ -180,6 +180,11 @@ class Result(MetaClasses.Serializable): ) return self + def set_timing(self, stopwatch: Utils.Stopwatch): + self.start_time = stopwatch.start_time + self.duration = stopwatch.duration + return self + def update_sub_result(self, result: "Result"): assert self.results, "BUG?" for i, result_ in enumerate(self.results): diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 823c7e0f36d..5db1a89ce99 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -125,15 +125,24 @@ class Runner: return 0 def _run(self, workflow, job, docker="", no_docker=False, param=None): + # re-set envs for local run + env = _Environment.get() + env.JOB_NAME = job.name + env.PARAMETER = job.parameter + env.dump() + if param: if not isinstance(param, str): Utils.raise_with_error( f"Custom param for local tests must be of type str, got [{type(param)}]" ) - env = _Environment.get() - env.dump() if job.run_in_docker and not no_docker: + job.run_in_docker, docker_settings = ( + job.run_in_docker.split("+")[0], + job.run_in_docker.split("+")[1:], + ) + from_root = "root" in docker_settings if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -145,7 +154,7 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -226,7 +235,8 @@ class Runner: print(info) result.set_info(info).set_status(Result.Status.ERROR).dump() - result.set_files(files=[Settings.RUN_LOG]) + if not result.is_ok(): + result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() if result.info and result.status != Result.Status.SUCCESS: @@ -329,7 +339,7 @@ class Runner: workflow, job, pr=pr, branch=branch, sha=sha ) - if res: + if res and (not local_run or pr or sha or branch): res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 8cfb70a9076..04a08622dcd 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -52,7 +52,7 @@ class S3: cmd += " --content-type text/plain" res = cls.run_command_with_retries(cmd) if not res: - raise + raise RuntimeError() bucket = s3_path.split("/")[0] endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] assert endpoint diff --git a/ci/praktika/workflow.py b/ci/praktika/workflow.py index 41e8056f9ef..8c5ec12440f 100644 --- a/ci/praktika/workflow.py +++ b/ci/praktika/workflow.py @@ -31,6 +31,7 @@ class Workflow: enable_report: bool = False enable_merge_ready_status: bool = False enable_cidb: bool = False + enable_merge_commit: bool = False def is_event_pull_request(self): return self.event == Workflow.Event.PULL_REQUEST diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index fb918b4ddba..1422a835663 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -80,6 +80,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{{{ github.event.pull_reguest.head.sha }}}} {JOB_ADDONS} - name: Prepare env script run: | diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index c67bdee015b..33173756924 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,23 +8,30 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" + STYLE_CHECKER = "style-checker" BASE_BRANCH = "master" +azure_secret = Secret.Config( + name="azure_connection_string", + type=Secret.Type.AWS_SSM_VAR, +) + SECRETS = [ Secret.Config( name="dockerhub_robot_password", type=Secret.Type.AWS_SSM_VAR, ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-id", - type=Secret.Type.AWS_SSM_SECRET, - ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-key", - type=Secret.Type.AWS_SSM_SECRET, - ), + azure_secret, + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-id", + # type=Secret.Type.AWS_SSM_SECRET, + # ), + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-key", + # type=Secret.Type.AWS_SSM_SECRET, + # ), ] DOCKERS = [ @@ -118,12 +125,12 @@ DOCKERS = [ # platforms=Docker.Platforms.arm_amd, # depends_on=["clickhouse/test-base"], # ), - # Docker.Config( - # name="clickhouse/stateless-test", - # path="./ci/docker/test/stateless", - # platforms=Docker.Platforms.arm_amd, - # depends_on=["clickhouse/test-base"], - # ), + Docker.Config( + name="clickhouse/stateless-test", + path="./ci/docker/stateless-test", + platforms=Docker.Platforms.arm_amd, + depends_on=[], + ), # Docker.Config( # name="clickhouse/stateful-test", # path="./ci/docker/test/stateful", @@ -230,5 +237,6 @@ DOCKERS = [ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" - BUILD_AMD_DEBUG = "Build amd64 debug" + BUILD = "Build" + BUILD_AMD_DEBUG = "Build (amd, debug)" STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index c7715b40fca..10dd77a0414 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -13,7 +13,8 @@ from ci.settings.definitions import ( class ArtifactNames: - ch_debug_binary = "clickhouse_debug_binary" + CH_AMD_DEBUG = "CH_AMD_DEBUG" + CH_AMD_RELEASE = "CH_AMD_RELEASE" style_check_job = Job.Config( @@ -37,10 +38,10 @@ fast_test_job = Job.Config( ), ) -job_build_amd_debug = Job.Config( - name=JobNames.BUILD_AMD_DEBUG, +amd_build_jobs = Job.Config( + name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py amd_debug", + command="python3 ./ci/jobs/build_clickhouse.py", run_in_docker="clickhouse/fasttest", digest_config=Job.CacheDigestConfig( include_paths=[ @@ -56,20 +57,30 @@ job_build_amd_debug = Job.Config( "./tests/ci/version_helper.py", ], ), - provides=[ArtifactNames.ch_debug_binary], +).parametrize( + parameter=["amd_debug", "amd_release"], + provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -stateless_tests_job = Job.Config( +statless_batch_num = 2 +stateless_tests_amd_debug_jobs = Job.Config( name=JobNames.STATELESS_TESTS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/fasttest:latest", + run_in_docker="clickhouse/stateless-test", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.ch_debug_binary], + requires=[ArtifactNames.CH_AMD_DEBUG], +).parametrize( + parameter=[ + f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) + ] + + ["non-parallel"], + runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] + + [[RunnerLabels.STYLE_CHECKER]], ) workflow = Workflow.Config( @@ -79,15 +90,20 @@ workflow = Workflow.Config( jobs=[ style_check_job, fast_test_job, - job_build_amd_debug, - stateless_tests_job, + *amd_build_jobs, + *stateless_tests_amd_debug_jobs, ], artifacts=[ Artifact.Config( - name=ArtifactNames.ch_debug_binary, + name=ArtifactNames.CH_AMD_DEBUG, type=Artifact.Type.S3, path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", - ) + ), + Artifact.Config( + name=ArtifactNames.CH_AMD_RELEASE, + type=Artifact.Type.S3, + path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", + ), ], dockers=DOCKERS, secrets=SECRETS, @@ -101,8 +117,11 @@ WORKFLOWS = [ ] # type: List[Workflow.Config] -if __name__ == "__main__": - # local job test inside praktika environment - from praktika.runner import Runner - - Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) +# if __name__ == "__main__": +# # local job test inside praktika environment +# from praktika.runner import Runner +# from praktika.digest import Digest +# +# print(Digest().calc_job_digest(amd_debug_build_job)) +# +# Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 100a6358dcf..3396b10814a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2153,9 +2153,9 @@ class TestSuite: self.sequential_tests = [] self.parallel_tests = [] for test_name in self.all_tests: - if self.is_sequential_test(test_name): + if self.is_sequential_test(test_name) and not args.no_sequential: self.sequential_tests.append(test_name) - else: + elif not args.no_parallel: self.parallel_tests.append(test_name) def is_sequential_test(self, test_name): @@ -3290,7 +3290,10 @@ def parse_args(): help='Replace random database name with "default" in stderr', ) parser.add_argument( - "--parallel", default="1/1", help="One parallel test run number/total" + "--no-sequential", action="store_true", help="Not run no-parallel" + ) + parser.add_argument( + "--no-parallel", action="store_true", help="Run only no-parallel" ) parser.add_argument( "-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel" @@ -3339,7 +3342,7 @@ def parse_args(): parser.add_argument( "--sequential", nargs="+", - help="Run these tests sequentially even if --parallel specified", + help="Run all tests sequentially", ) parser.add_argument( "--no-long", action="store_true", dest="no_long", help="Do not run long tests" diff --git a/tests/config/config.d/ssl_certs.xml b/tests/config/config.d/ssl_certs.xml index 26b679f39df..c20fef89e00 100644 --- a/tests/config/config.d/ssl_certs.xml +++ b/tests/config/config.d/ssl_certs.xml @@ -1,8 +1,8 @@ - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key + /tmp/praktika/etc/clickhouse-server/server.crt + /tmp/praktika/etc/clickhouse-server/server.key diff --git a/tests/config/install.sh b/tests/config/install.sh index be47298f6a4..cdae5741fce 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -9,6 +9,21 @@ DEST_SERVER_PATH="${1:-/etc/clickhouse-server}" DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}" SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +shift # DEST_SERVER_PATH +shift # DEST_CLIENT_PATH + +FAST_TEST=0 +S3_STORAGE=0 + +while [[ "$#" -gt 0 ]]; do + case $1 in + --fast-test) FAST_TEST=1 ;; + --s3-storage) S3_STORAGE=1 ;; + *) echo "Unknown option: $1" ; exit 1 ;; + esac + shift +done + echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH" mkdir -p $DEST_SERVER_PATH/config.d/ @@ -72,9 +87,8 @@ ln -sf $SRC_PATH/config.d/serverwide_trace_collector.xml $DEST_SERVER_PATH/confi ln -sf $SRC_PATH/config.d/rocksdb.xml $DEST_SERVER_PATH/config.d/ # Not supported with fasttest. -if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] -then - ln -sf $SRC_PATH/config.d/legacy_geobase.xml $DEST_SERVER_PATH/config.d/ +if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] || [ "$FAST_TEST" != "1" ]; then + ln -sf "$SRC_PATH/config.d/legacy_geobase.xml" "$DEST_SERVER_PATH/config.d/" fi ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ @@ -185,7 +199,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/ fi -if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then +if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ @@ -195,7 +209,7 @@ if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/ fi -if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then +if [[ "$USE_DATABASE_REPLICATED" == "1" ]]; then ln -sf $SRC_PATH/users.d/database_replicated.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/config.d/database_replicated.xml $DEST_SERVER_PATH/config.d/ rm /etc/clickhouse-server/config.d/zookeeper.xml diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 40e93e713a1..837c05a9c5d 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -5,6 +5,12 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +if [ -d "$TEMP_DIR" ]; then + cd "$TEMP_DIR" + # add / for minio mc in docker + PATH="/:.:$PATH" +fi + usage() { echo $"Usage: $0 (default path: /usr/share/clickhouse-test)" exit 1 @@ -70,9 +76,10 @@ download_minio() { } start_minio() { + pwd mkdir -p ./minio_data - ./minio --version - ./minio server --address ":11111" ./minio_data & + minio --version + minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -80,12 +87,14 @@ start_minio() { setup_minio() { local test_type=$1 - ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse - ./mc admin user add clickminio test testtest - ./mc admin policy attach clickminio readwrite --user=test - ./mc mb --ignore-existing clickminio/test + echo "setup_minio(), test_type=$test_type" + mc alias set clickminio http://localhost:11111 clickhouse clickhouse + mc admin user add clickminio test testtest + mc admin policy attach clickminio readwrite --user=test ||: + mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - ./mc anonymous set public clickminio/test + echo "Create @test bucket in minio" + mc anonymous set public clickminio/test fi } @@ -95,12 +104,13 @@ upload_data() { local query_dir=$1 local test_path=$2 local data_path=${test_path}/queries/${query_dir}/data_minio + echo "upload_data() data_path=$data_path" # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 if [ -d "${data_path}" ]; then - ./mc cp --recursive "${data_path}"/ clickminio/test/ + mc cp --recursive "${data_path}"/ clickminio/test/ fi } @@ -138,7 +148,7 @@ wait_for_it() { main() { local query_dir query_dir=$(check_arg "$@") - if [ ! -f ./minio ]; then + if ! (minio --version && mc --version); then download_minio fi start_minio From ead7630d04b5aab7ff28a0e99710a8b6ce17800c Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 5 Nov 2024 14:28:28 +0100 Subject: [PATCH 217/566] Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- .../02812_from_to_utc_timestamp.reference | 5 +++ .../02812_from_to_utc_timestamp.sh | 8 +++- .../0_stateless/03224_arrayUnion.reference | 10 +++++ .../queries/0_stateless/03224_arrayUnion.sql | 21 ++++++++- ...tile_exact_weighted_interpolated.reference | 13 +++--- ...0_quantile_exact_weighted_interpolated.sql | 45 ++++++++++++++++--- 6 files changed, 88 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference index 4da8a9784dd..bdce849e069 100644 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference @@ -3,3 +3,8 @@ 3 2023-03-16 12:22:33 2023-03-16 10:22:33.000 2023-03-16 03:22:33 2023-03-16 19:22:33.123 2024-02-24 10:22:33 2024-02-24 12:22:33 2024-10-24 09:22:33 2024-10-24 13:22:33 +2024-10-24 16:22:33 2024-10-24 06:22:33 +leap year: 2024-02-29 16:22:33 2024-02-29 06:22:33 +non-leap year: 2023-03-01 16:22:33 2023-03-01 06:22:33 +timezone with half-hour offset: 2024-02-29 00:52:33 2024-02-29 21:52:33 +jump over a year: 2024-01-01 04:01:01 2023-12-31 20:01:01 diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 835dab8af57..441fc254256 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -15,4 +15,10 @@ $CLICKHOUSE_CLIENT -q "select x, to_utc_timestamp(toDateTime('2023-03-16 11:22:3 # timestamp convert between DST timezone and UTC $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid')" -$CLICKHOUSE_CLIENT -q "drop table test_tbl" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'timezone with half-hour offset:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide')" +$CLICKHOUSE_CLIENT -q "select 'jump over a year:', to_utc_timestamp(toDateTime('2023-12-31 23:01:01'), 'EST'), from_utc_timestamp(toDateTime('2024-01-01 01:01:01'), 'EST')" + +$CLICKHOUSE_CLIENT -q "drop table test_tbl" diff --git a/tests/queries/0_stateless/03224_arrayUnion.reference b/tests/queries/0_stateless/03224_arrayUnion.reference index b900b6cdb0a..9b871234d27 100644 --- a/tests/queries/0_stateless/03224_arrayUnion.reference +++ b/tests/queries/0_stateless/03224_arrayUnion.reference @@ -41,3 +41,13 @@ [1,2,3,4,5,10,20] ------- [1,2,3] +------- +[10,-2,1] ['hello','hi'] [3,2,1,NULL] +------- +------- +[1] +------- +[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256] +199999 +------- +[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19] diff --git a/tests/queries/0_stateless/03224_arrayUnion.sql b/tests/queries/0_stateless/03224_arrayUnion.sql index dedbacad906..14a9192f39a 100644 --- a/tests/queries/0_stateless/03224_arrayUnion.sql +++ b/tests/queries/0_stateless/03224_arrayUnion.sql @@ -35,4 +35,23 @@ SELECT arraySort(arrayUnion([NULL, NULL, NULL, 1], [1, NULL, NULL], [1, 2, 3, NU select '-------'; SELECT arraySort(arrayUnion([1, 1, 1, 2, 3], [2, 2, 4], [5, 10, 20])); select '-------'; -SELECT arraySort(arrayUnion([1, 2], [1, 3], [])), +SELECT arraySort(arrayUnion([1, 2], [1, 3], [])); +select '-------'; +-- example from docs +SELECT + arrayUnion([-2, 1], [10, 1], [-2], []) as num_example, + arrayUnion(['hi'], [], ['hello', 'hi']) as str_example, + arrayUnion([1, 3, NULL], [2, 3, NULL]) as null_example; +select '-------'; +--mix of types +SELECT arrayUnion([1], [-2], [1.1, 'hi'], [NULL, 'hello', []]); -- {serverError NO_COMMON_TYPE} +select '-------'; +SELECT arrayUnion([1]); +SELECT arrayUnion(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select '-------'; +--bigger arrays +SELECT arraySort(arrayUnion(range(1, 256), range(2, 257))); +SELECT length(arrayUnion(range(1, 100000), range(9999, 200000))); +select '-------'; +--bigger number of arguments +SELECT arraySort(arrayUnion([1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9], [1, 10], [1, 11], [1, 12], [1, 13], [1, 14], [1, 15], [1, 16], [1, 17], [1, 18], [1, 19])); diff --git a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference index 23cbe2bfdec..ccb315b8305 100644 --- a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference +++ b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference @@ -1,6 +1,6 @@ quantileExactWeightedInterpolated -0 0 0 Decimal(38, 8) --25.5 -8.49999999 -5.1 Decimal(38, 8) +0 0 0 25 2024-02-20 Decimal(38, 8) +-25.5 -8.49999999 -5.1 12.25 2024-01-25 Decimal(38, 8) 0 0 0 10 3.33333333 2 20 6.66666666 4 @@ -10,11 +10,14 @@ quantileExactWeightedInterpolated [-50,-40,-30,-20,-10,0,10,20,30,40,50] [-16.66666666,-13.33333333,-10,-6.66666666,-3.33333333,0,3.33333333,6.66666666,10,13.33333333,16.66666666] [-10,-8,-6,-4,-2,0,2,4,6,8,10] +[0,5,10,15,20,25,30,35,40,45,50] +['2024-01-01','2024-01-11','2024-01-21','2024-01-31','2024-02-10','2024-02-20','2024-03-01','2024-03-11','2024-03-21','2024-03-31','2024-04-10'] quantileExactWeightedInterpolatedState [10000.6,20000.2,29999.8,39999.4] Test with filter that returns no rows -0 0 0 +0 0 0 nan 1970-01-01 +0 0 0 nan 1970-01-01 Test with dynamic weights -21 7 4.2 +21 7 4.2 35.5 2024-03-12 Test with all weights set to 0 -0 0 0 +0 0 0 nan 1970-01-01 diff --git a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql index dba16eae22a..a64b46e751b 100644 --- a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql +++ b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql @@ -5,16 +5,28 @@ CREATE TABLE decimal a Decimal32(4), b Decimal64(8), c Decimal128(8), + f Float64, + d Date, w UInt64 ) ENGINE = Memory; -INSERT INTO decimal (a, b, c, w) -SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5, number +INSERT INTO decimal (a, b, c, f, d, w) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5, number/2, addDays(toDate('2024-01-01'), number), number FROM system.numbers LIMIT 101; SELECT 'quantileExactWeightedInterpolated'; -SELECT medianExactWeightedInterpolated(a, 1), medianExactWeightedInterpolated(b, 2), medianExactWeightedInterpolated(c, 3) as x, toTypeName(x) FROM decimal; -SELECT quantileExactWeightedInterpolated(a, 1), quantileExactWeightedInterpolated(b, 2), quantileExactWeightedInterpolated(c, 3) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3) as x, + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3) as x, + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal WHERE a < 0; SELECT quantileExactWeightedInterpolated(0.0)(a, 1), quantileExactWeightedInterpolated(0.0)(b, 2), quantileExactWeightedInterpolated(0.0)(c, 3) FROM decimal WHERE a >= 0; SELECT quantileExactWeightedInterpolated(0.2)(a, 1), quantileExactWeightedInterpolated(0.2)(b, 2), quantileExactWeightedInterpolated(0.2)(c, 3) FROM decimal WHERE a >= 0; SELECT quantileExactWeightedInterpolated(0.4)(a, 1), quantileExactWeightedInterpolated(0.4)(b, 2), quantileExactWeightedInterpolated(0.4)(c, 3) FROM decimal WHERE a >= 0; @@ -24,6 +36,8 @@ SELECT quantileExactWeightedInterpolated(1.0)(a, 1), quantileExactWeightedInterp SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(f, 4) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(d, 5) FROM decimal; SELECT 'quantileExactWeightedInterpolatedState'; SELECT quantilesExactWeightedInterpolatedMerge(0.2, 0.4, 0.6, 0.8)(x) @@ -34,12 +48,29 @@ FROM ); SELECT 'Test with filter that returns no rows'; -SELECT medianExactWeightedInterpolated(a, 1), medianExactWeightedInterpolated(b, 2), medianExactWeightedInterpolated(c, 3) FROM decimal WHERE a > 1000; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3), + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5) FROM decimal WHERE a > 1000; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3), + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5) FROM decimal WHERE d < toDate('2024-01-01'); SELECT 'Test with dynamic weights'; -SELECT medianExactWeightedInterpolated(a, w), medianExactWeightedInterpolated(b, w), medianExactWeightedInterpolated(c, w) FROM decimal; +SELECT medianExactWeightedInterpolated(a, w), + medianExactWeightedInterpolated(b, w), + medianExactWeightedInterpolated(c, w), + medianExactWeightedInterpolated(f, w), + medianExactWeightedInterpolated(d, w) FROM decimal; SELECT 'Test with all weights set to 0'; -SELECT medianExactWeightedInterpolated(a, 0), medianExactWeightedInterpolated(b, 0), medianExactWeightedInterpolated(c, 0) FROM decimal; +SELECT medianExactWeightedInterpolated(a, 0), + medianExactWeightedInterpolated(b, 0), + medianExactWeightedInterpolated(c, 0), + medianExactWeightedInterpolated(f, 0), + medianExactWeightedInterpolated(d, 0) FROM decimal; DROP TABLE IF EXISTS decimal; From 6ecc673f7d4a9890004a24c16d8c6b9b5a857d93 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:02:40 +0000 Subject: [PATCH 218/566] Fix quorum inserts tests --- tests/integration/test_quorum_inserts/test.py | 114 +++++++++--------- 1 file changed, 54 insertions(+), 60 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..de437fc3206 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -2,6 +2,7 @@ import concurrent import time import pytest +import uuid from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager @@ -46,10 +47,11 @@ def started_cluster(): def test_simple_add_replica(started_cluster): - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + table_name = "test_simple_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( - "CREATE TABLE test_simple " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" @@ -58,91 +60,81 @@ def test_simple_add_replica(started_cluster): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_simple") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query( - "INSERT INTO test_simple VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name} VALUES (1, '2011-01-01')", settings={"insert_quorum": 1}, ) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "" == first.query(f"SELECT * from {table_name}") - first.query("SYSTEM START FETCHES test_simple") + first.query(f"SYSTEM START FETCHES {table_name}") - first.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") second.query(create_query) - second.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == second.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == second.query(f"SELECT * from {table_name}") - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") def test_drop_replica_and_achieve_quorum(started_cluster): + table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex zero.query( - "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" + f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" ) - create_query = ( - "CREATE TABLE test_drop_replica_and_achieve_quorum " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" ) - print("Create Replicated table with two replicas") zero.query(create_query) first.query(create_query) - print("Stop fetches on one replica. Since that, it will be isolated.") - first.query("SYSTEM STOP FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert to other replica. This query will fail.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES (1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." - assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - assert TSV("") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - # TODO:(Mikhaylov) begin; maybe delete this lines. I want clickhouse to fetch parts and update quorum. print("START FETCHES first replica") - first.query("SYSTEM START FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM START FETCHES {table_name}") print("SYNC first replica") - first.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) # TODO:(Mikhaylov) end - print("Add second replica") second.query(create_query) - print("SYNC second replica") - second.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) - + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) print("Quorum for previous insert achieved.") assert TSV("1\t2011-01-01\n") == TSV( second.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) @@ -296,10 +288,11 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + table_name = "test_insert_quorum_with_ttl_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( - "CREATE TABLE test_insert_quorum_with_ttl " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "PARTITION BY d ORDER BY a " @@ -311,12 +304,12 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query(create_query) first.query(create_query) - print("Stop fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_ttl") + print(f"Stop fetches for {table_name} at first replica.") + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert should fail since it can not reach the quorum.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." @@ -327,51 +320,52 @@ def test_insert_quorum_with_ttl(started_cluster): time.sleep(10) assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - print("Resume fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM START FETCHES test_insert_quorum_with_ttl") + print(f"Resume fetches for {table_name} at first replica.") + first.query(f"SYSTEM START FETCHES {table_name}") print("Sync first replica.") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"SYSTEM SYNC REPLICA {table_name}") zero.query( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) print("Inserts should resume.") - zero.query("INSERT INTO test_insert_quorum_with_ttl(a, d) VALUES(2, '2012-02-02')") + zero.query(f"INSERT INTO {table_name}(a, d) VALUES(2, '2012-02-02')") - first.query("OPTIMIZE TABLE test_insert_quorum_with_ttl") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") - zero.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"OPTIMIZE TABLE {table_name}") + first.query(f"SYSTEM SYNC REPLICA {table_name}") + zero.query(f"SYSTEM SYNC REPLICA {table_name}") assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): + table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex zero.query( - "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" + f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" ) create_query = ( - "CREATE TABLE test_insert_quorum_with_keeper_loss" + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "ORDER BY a " @@ -380,7 +374,7 @@ def test_insert_quorum_with_keeper_loss_connection(): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_commit_zk_fail_after_op") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_insert_retry_pause") @@ -388,7 +382,7 @@ def test_insert_quorum_with_keeper_loss_connection(): with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: insert_future = executor.submit( lambda: zero.query( - "INSERT INTO test_insert_quorum_with_keeper_loss(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 150000}, ) ) @@ -401,7 +395,7 @@ def test_insert_quorum_with_keeper_loss_connection(): while True: if ( zk.exists( - "/clickhouse/tables/test_insert_quorum_with_keeper_loss/replicas/zero/is_active" + f"/clickhouse/tables/{table_name}/replicas/zero/is_active" ) is None ): @@ -418,7 +412,7 @@ def test_insert_quorum_with_keeper_loss_connection(): "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 ) ) - first.query("SYSTEM START FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM START FETCHES {table_name}") concurrent.futures.wait([quorum_fail_future]) From 3eedc74c5943f23ed4e360533e6e3bb5a6238109 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:25:58 +0000 Subject: [PATCH 219/566] Reformatted because of style check --- tests/integration/test_quorum_inserts/test.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index de437fc3206..824cb371595 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -88,10 +88,8 @@ def test_simple_add_replica(started_cluster): def test_drop_replica_and_achieve_quorum(started_cluster): - table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex - zero.query( - f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" - ) + table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -361,9 +359,7 @@ def test_insert_quorum_with_ttl(started_cluster): def test_insert_quorum_with_keeper_loss_connection(started_cluster): table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex - zero.query( - f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" - ) + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -394,9 +390,7 @@ def test_insert_quorum_with_keeper_loss_connection(started_cluster): zk = cluster.get_kazoo_client("zoo1") while True: if ( - zk.exists( - f"/clickhouse/tables/{table_name}/replicas/zero/is_active" - ) + zk.exists(f"/clickhouse/tables/{table_name}/replicas/zero/is_active") is None ): break From 27153bfc27d45a9fddddf070bb82c7f1e164b455 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:58:21 +0000 Subject: [PATCH 220/566] Resolve issues --- tests/integration/test_quorum_inserts/test.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 824cb371595..7adc51121b4 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -1,8 +1,8 @@ import concurrent import time +import uuid import pytest -import uuid from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager @@ -48,7 +48,6 @@ def started_cluster(): def test_simple_add_replica(started_cluster): table_name = "test_simple_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " @@ -89,7 +88,6 @@ def test_simple_add_replica(started_cluster): def test_drop_replica_and_achieve_quorum(started_cluster): table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -287,7 +285,6 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): table_name = "test_insert_quorum_with_ttl_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " @@ -359,7 +356,6 @@ def test_insert_quorum_with_ttl(started_cluster): def test_insert_quorum_with_keeper_loss_connection(started_cluster): table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " From 0687f7a83f1a64abd586c5046dbc5ddda427e00a Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 17:09:03 +0000 Subject: [PATCH 221/566] Resolve issue --- tests/integration/test_quorum_inserts/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 7adc51121b4..a646319c5f9 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -143,7 +143,7 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): "test_quorum_insert_with_drop_partition_new_data" if add_new_data else "test_quorum_insert_with_drop_partition" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( @@ -206,12 +206,12 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): "test_insert_quorum_with_move_partition_source_new_data" if add_new_data else "test_insert_quorum_with_move_partition_source" - ) + ) + uuid.uuid4().hex destination_table_name = ( "test_insert_quorum_with_move_partition_destination_new_data" if add_new_data else "test_insert_quorum_with_move_partition_destination" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster") zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster") From 98ee0893318bcfd4e0d63b564f513b37579bd3c8 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 5 Nov 2024 17:31:47 +0000 Subject: [PATCH 222/566] Cleanup --- tests/queries/0_stateless/03261_pr_semi_anti_join.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql index d2ea3725d6b..2d671756d6e 100644 --- a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -1,6 +1,5 @@ DROP TABLE IF EXISTS t1 SYNC; DROP TABLE IF EXISTS t2 SYNC; -create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); From 27efa296849e1aaa649adb51ef280410169d8018 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Tue, 5 Nov 2024 18:04:59 +0000 Subject: [PATCH 223/566] update docs --- .../statements/select/order-by.md | 61 ++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index 512a58d7cd9..25d2e7123fd 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -291,7 +291,7 @@ All missed values of `expr` column will be filled sequentially and other columns To fill multiple columns, add `WITH FILL` modifier with optional parameters after each field name in `ORDER BY` section. ``` sql -ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] +ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr] [STALENESS const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] [STALENESS numeric_expr] [INTERPOLATE [(col [AS expr], ... colN [AS exprN])]] ``` @@ -300,6 +300,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`. When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals. When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type. +When `STALENESS const_numeric_expr` is defined, the query will generate rows until the difference from the previous row in the original data exceeds `const_numeric_expr`. `INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeat previous value. Omitted list will result in including all allowed columns. Example of a query without `WITH FILL`: @@ -497,6 +498,64 @@ Result: └────────────┴────────────┴──────────┘ ``` +Example of a query without `STALENESS`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 3 │ 0 │ │ + 5. │ 4 │ 0 │ │ + 6. │ 5 │ 25 │ original │ + 7. │ 6 │ 0 │ │ + 8. │ 7 │ 0 │ │ + 9. │ 8 │ 0 │ │ +10. │ 9 │ 0 │ │ +11. │ 10 │ 50 │ original │ +12. │ 11 │ 0 │ │ +13. │ 12 │ 0 │ │ +14. │ 13 │ 0 │ │ +15. │ 14 │ 0 │ │ +16. │ 15 │ 75 │ original │ + └─────┴───────┴──────────┘ +``` + +Same query after applying `STALENESS 3`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL STALENESS 3; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 5 │ 25 │ original │ + 5. │ 6 │ 0 │ │ + 6. │ 7 │ 0 │ │ + 7. │ 10 │ 50 │ original │ + 8. │ 11 │ 0 │ │ + 9. │ 12 │ 0 │ │ +10. │ 15 │ 75 │ original │ +11. │ 16 │ 0 │ │ +12. │ 17 │ 0 │ │ + └─────┴───────┴──────────┘ +``` + Example of a query without `INTERPOLATE`: ``` sql From d7da086a2e474b1938568dbd47f6515344ef397f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 20:50:05 +0100 Subject: [PATCH 224/566] Fix tidy --- src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index 943febf4b0e..2add11d0f6a 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -67,7 +67,7 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr return false; } - if (auto tables = node->as()) + if (auto * tables = node->as()) { /// Contrary to what's said in the code block above, ARRAY JOIN needs to resolve the subquery if possible /// and assign an alias for 02367_optimize_trivial_count_with_array_join to pass. Otherwise it will fail in From 45bdc4d4deaf6a48ec08f52a9bc8a765730a9b88 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 6 Nov 2024 01:12:07 +0000 Subject: [PATCH 225/566] Update tests --- .../02932_refreshable_materialized_views_1.reference | 8 ++++---- .../02932_refreshable_materialized_views_2.reference | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference index 3ec0d3b9ee2..b50ea042e86 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference @@ -1,14 +1,14 @@ <1: created view> a -CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x +CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x <2: refreshed> 3 1 1 <3: time difference at least> 1000 <4.1: fake clock> Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:02 1 3 3 3 0 <4.5: altered> Scheduled 2050-01-01 00:00:01 2052-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src +CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 2 AS x\nFROM default.src <5: no refresh> 3 <6: refreshed> 2 <7: refreshed> Scheduled 2052-02-03 04:05:06 2054-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 10 AS y\nFROM default.a <7.5: created dependent> 2052-11-11 11:11:11 <8: refreshed> 20 <9: refreshed> a Scheduled 2054-01-01 00:00:00 @@ -26,4 +26,4 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n( <17: chain-refreshed> a Scheduled 2062-01-01 00:00:00 <17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 <18: removed dependency> b Scheduled 2062-03-03 03:03:03 2062-03-03 03:03:03 2064-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 10 AS y\nFROM default.a diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference index 3eeab4f574e..8dcc3d55603 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference @@ -7,9 +7,9 @@ <25: rename during refresh> f Running <27: cancelled> f Scheduled cancelled <28: drop during refresh> 0 0 -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT 42 AS x <29: randomize> 1 1 -CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src +CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 10 AS x\nFROM default.src <30: to existing table> 10 <31: to existing table> 10 <31: to existing table> 20 From c23dfa343155a7162b1bcf1f98080f5a08b92f7f Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 12:30:37 +0800 Subject: [PATCH 226/566] fix uninitialized orc data --- .../Impl/NativeORCBlockInputFormat.cpp | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 81df330ffb5..5c7637d3e51 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1534,15 +1534,24 @@ static ColumnWithTypeAndName readColumnWithDateData( for (size_t i = 0; i < orc_int_column->numElements; ++i) { - Int32 days_num = static_cast(orc_int_column->data[i]); - if (check_date_range && (days_num > DATE_LUT_MAX_EXTEND_DAY_NUM || days_num < -DAYNUM_OFFSET_EPOCH)) - throw Exception( - ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, - "Input value {} of a column \"{}\" exceeds the range of type Date32", - days_num, - column_name); + if (!orc_int_column->hasNulls || orc_int_column->notNull[i]) + { + Int32 days_num = static_cast(orc_int_column->data[i]); + if (check_date_range && (days_num > DATE_LUT_MAX_EXTEND_DAY_NUM || days_num < -DAYNUM_OFFSET_EPOCH)) + throw Exception( + ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, + "Input value {} of a column \"{}\" exceeds the range of type Date32", + days_num, + column_name); + + column_data.push_back(days_num); + } + else + { + /// ORC library doesn't gurantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 + column_data.push_back(0); + } - column_data.push_back(days_num); } return {std::move(internal_column), internal_type, column_name}; From 6a8df5ea89724d7686f6c520bc436b7cb80294bd Mon Sep 17 00:00:00 2001 From: nauu Date: Wed, 6 Nov 2024 14:57:14 +0800 Subject: [PATCH 227/566] support the endpoint of oss accelerator --- src/IO/S3/URI.cpp | 12 ++++++++++-- src/IO/tests/gtest_s3_uri.cpp | 16 ++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 7c6a21941eb..ad746ff3326 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -37,7 +37,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3express[\-a-z0-9]+|s3|cos|obs|oss|eos)([.\-][a-z0-9\-.:]+))"); + static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3express[\-a-z0-9]+|s3|cos|obs|oss-data-acc|oss|eos)([.\-][a-z0-9\-.:]+))"); /// Case when AWS Private Link Interface is being used /// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key) @@ -115,7 +115,15 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) && re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket, &name, &endpoint_authority_from_uri)) { is_virtual_hosted_style = true; - endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri; + if (name == "oss-data-acc") + { + bucket = bucket.substr(0, bucket.find(".")); + endpoint = uri.getScheme() + "://" + uri.getHost().substr(bucket.length() + 1); + } + else + { + endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri; + } validateBucket(bucket, uri); if (!uri.getPath().empty()) diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index 8696fab0616..6167313b634 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -212,6 +212,22 @@ TEST(S3UriTest, validPatterns) ASSERT_EQ("", uri.version_id); ASSERT_EQ(true, uri.is_virtual_hosted_style); } + { + S3::URI uri("https://bucket-test1.oss-cn-beijing-internal.aliyuncs.com/ab-test"); + ASSERT_EQ("https://oss-cn-beijing-internal.aliyuncs.com", uri.endpoint); + ASSERT_EQ("bucket-test1", uri.bucket); + ASSERT_EQ("ab-test", uri.key); + ASSERT_EQ("", uri.version_id); + ASSERT_EQ(true, uri.is_virtual_hosted_style); + } + { + S3::URI uri("https://bucket-test.cn-beijing-internal.oss-data-acc.aliyuncs.com/ab-test"); + ASSERT_EQ("https://cn-beijing-internal.oss-data-acc.aliyuncs.com", uri.endpoint); + ASSERT_EQ("bucket-test", uri.bucket); + ASSERT_EQ("ab-test", uri.key); + ASSERT_EQ("", uri.version_id); + ASSERT_EQ(true, uri.is_virtual_hosted_style); + } } TEST(S3UriTest, versionIdChecks) From 127f324822e7b45259eb6ec9b9f5168933350aa1 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 15:03:41 +0800 Subject: [PATCH 228/566] add uts --- .../Formats/Impl/NativeORCBlockInputFormat.cpp | 3 +-- .../03259_orc_date_out_of_range.reference | 12 ++++++++++++ .../0_stateless/03259_orc_date_out_of_range.sql | 15 +++++++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03259_orc_date_out_of_range.reference create mode 100644 tests/queries/0_stateless/03259_orc_date_out_of_range.sql diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 5c7637d3e51..26aa3555c2b 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1548,10 +1548,9 @@ static ColumnWithTypeAndName readColumnWithDateData( } else { - /// ORC library doesn't gurantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 + /// ORC library doesn't guarantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 column_data.push_back(0); } - } return {std::move(internal_column), internal_type, column_name}; diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.reference b/tests/queries/0_stateless/03259_orc_date_out_of_range.reference new file mode 100644 index 00000000000..ddac785369f --- /dev/null +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.reference @@ -0,0 +1,12 @@ +number Nullable(Int64) +date_field Nullable(Date32) +\N +1970-01-02 +\N +1970-01-04 +\N +1970-01-06 +\N +1970-01-08 +\N +1970-01-10 diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql new file mode 100644 index 00000000000..470c4ff3817 --- /dev/null +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -0,0 +1,15 @@ + +-- Tags: no-parallel + +SET session_timezone = 'UTC'; +SET engine_file_truncate_on_insert = 1; + +insert into function file('03259.orc') +select + number, + if (number % 2 = 0, null, toDate32(number)) as date_field + from numbers(10); + +desc file('03259.orc'); + +select date_field from file('03259.orc') order by number; From ef0be4a01cb4fd9c4723ecf31b96aab7ee6a30ac Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 15:06:00 +0800 Subject: [PATCH 229/566] fix typo --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 470c4ff3817..409e8ce079d 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -1,4 +1,3 @@ - -- Tags: no-parallel SET session_timezone = 'UTC'; From 590029a33bfd844eede8b4ad570464d0cf86c938 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 16:38:09 +0800 Subject: [PATCH 230/566] fix orc date32 overflow --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 409e8ce079d..7103b93b147 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -3,12 +3,12 @@ SET session_timezone = 'UTC'; SET engine_file_truncate_on_insert = 1; -insert into function file('03259.orc') +insert into function file('03259.orc', 'ORC') select number, if (number % 2 = 0, null, toDate32(number)) as date_field - from numbers(10); +from numbers(10); -desc file('03259.orc'); +desc file('03259.orc', 'ORC'); -select date_field from file('03259.orc') order by number; +select date_field from file('03259.orc', 'ORC') order by number; From 6761fccbf30cba1b18331bab993710e89c047aba Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 17:10:00 +0800 Subject: [PATCH 231/566] fix orc date32 overflow --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 7103b93b147..e73d2faa5dd 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-fasttest, no-parallel SET session_timezone = 'UTC'; SET engine_file_truncate_on_insert = 1; From 068b4fe8cfa184c4aaecda057b78d7b8acfdbb06 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 6 Nov 2024 12:16:59 +0100 Subject: [PATCH 232/566] squash! Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- tests/queries/0_stateless/02812_from_to_utc_timestamp.reference | 2 ++ tests/queries/0_stateless/02812_from_to_utc_timestamp.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference index bdce849e069..fb92bdda821 100644 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference @@ -6,5 +6,7 @@ 2024-10-24 16:22:33 2024-10-24 06:22:33 leap year: 2024-02-29 16:22:33 2024-02-29 06:22:33 non-leap year: 2023-03-01 16:22:33 2023-03-01 06:22:33 +leap year: 2024-02-29 04:22:33 2024-02-29 19:22:33 +non-leap year: 2023-03-01 04:22:33 2023-02-28 19:22:33 timezone with half-hour offset: 2024-02-29 00:52:33 2024-02-29 21:52:33 jump over a year: 2024-01-01 04:01:01 2023-12-31 20:01:01 diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 441fc254256..20ae224332c 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -18,6 +18,8 @@ $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33') $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-03-01 00:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-03-01 00:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'timezone with half-hour offset:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide')" $CLICKHOUSE_CLIENT -q "select 'jump over a year:', to_utc_timestamp(toDateTime('2023-12-31 23:01:01'), 'EST'), from_utc_timestamp(toDateTime('2024-01-01 01:01:01'), 'EST')" From 699b9d40263078285a8fce6a031bc74ce72c16d3 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Wed, 6 Nov 2024 20:20:44 +0800 Subject: [PATCH 233/566] fix comments --- src/Functions/parseDateTime.cpp | 277 ++++++++++++++++++++++---------- 1 file changed, 194 insertions(+), 83 deletions(-) diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 65bc65fb45c..976be53a21e 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -57,8 +57,15 @@ namespace Null }; + enum class ReturnType: uint8_t + { + DateTime, + DateTime64 + }; + constexpr Int32 minYear = 1970; constexpr Int32 maxYear = 2106; + constexpr Int32 maxPrecisionOfDateTime64 = 6; const std::unordered_map> dayOfWeekMap{ {"mon", {"day", 1}}, @@ -570,8 +577,8 @@ namespace } }; - /// _FUNC_(str[, format, timezone]) - template + /// _FUNC_(str[scale, format, timezone]) + template class FunctionParseDateTimeImpl : public IFunction { public: @@ -602,79 +609,112 @@ namespace {"time", static_cast(&isString), nullptr, "String"} }; - FunctionArgumentDescriptors optional_args{ - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - + FunctionArgumentDescriptors optional_args; + if constexpr (return_type == ReturnType::DateTime64) + { + optional_args = { + {"precision or format", static_cast([](const IDataType & data_type) -> bool { + return isUInt(data_type) || isString(data_type); + }), nullptr, "Number or String"}, + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; + } + else + optional_args = { + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); - DataTypePtr date_type = nullptr; - if constexpr (parseDateTime64) + DataTypePtr data_type; + if constexpr (return_type == ReturnType::DateTime64) { - String format = getFormat(arguments); - std::vector instructions = parseFormat(format); - UInt32 scale = 0; - if (!instructions.empty()) + if (arguments.size() == 1) + return std::make_shared(0, time_zone_name); + else { - for (const auto & ins : instructions) + UInt32 precision = 0; + if (isUInt(arguments[1].type)) { - if (scale > 0) - break; - const String fragment = ins.getFragment(); + const auto * col_precision = checkAndGetColumnConst(arguments[1].column.get()); + if (col_precision) + precision = col_precision->getValue(); + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The input precision value may exceed the max value of `DateTime64`: {}.", + maxPrecisionOfDateTime64); + } + /// Construct the return type `DataTypDateTime64` with precision and time zone name. The precision value can be specified or be extracted + /// from the format string by computing how many 'S' characters are contained in the format's micorsceond fragment. + String format = getFormat(arguments, precision); + std::vector instructions = parseFormat(format); + for (const auto & instruction : instructions) + { + const String & fragment = instruction.getFragment(); + UInt32 val = 0; for (char ch : fragment) { if (ch != 'S') { - scale = 0; + val = 0; break; } else - scale++; + val++; } + /// If the precision is already specified by the second parameter, but it not equals the value that extract from the format string, + /// then we should throw an exception; If the precision is not specified, then we set its value as the extracted one. + if (val != 0 && precision != 0 && val != precision) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The precision of input format string {} not equals the given precision value {}.", + format, + precision); + else if (precision == 0 && val != 0) + precision = val; } + if (precision > maxPrecisionOfDateTime64) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The precision of the input format string {} exceed the max precision value {}.", + format, + maxPrecisionOfDateTime64); + data_type = std::make_shared(precision, time_zone_name); } - date_type = std::make_shared(scale, time_zone_name); } else - date_type = std::make_shared(time_zone_name); + data_type = std::make_shared(time_zone_name); if (error_handling == ErrorHandling::Null) - return std::make_shared(date_type); - return date_type; + return std::make_shared(data_type); + return data_type; } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { - ColumnUInt8::MutablePtr col_null_map; + DataTypePtr non_null_result_type; if constexpr (error_handling == ErrorHandling::Null) - col_null_map = ColumnUInt8::create(input_rows_count, 0); - if constexpr (parseDateTime64) + non_null_result_type = removeNullable(result_type); + else + non_null_result_type = result_type; + + if constexpr (return_type == ReturnType::DateTime64) { - const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - auto col_res = ColumnDateTime64::create(input_rows_count, datatime64_type->getScale()); - PaddedPODArray & res_data = col_res->getData(); - executeImpl2(arguments, result_type, input_rows_count, res_data, col_null_map); - if constexpr (error_handling == ErrorHandling::Null) - return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); - else - return col_res; + const auto * datatime64_type = checkAndGetDataType(non_null_result_type.get()); + MutableColumnPtr col_res = ColumnDateTime64::create(input_rows_count, datatime64_type->getScale()); + ColumnDateTime64 * col_datetime64 = assert_cast(col_res.get()); + return executeImpl2(arguments, result_type, input_rows_count, col_res, col_datetime64->getData()); } else { - auto col_res = ColumnDateTime::create(input_rows_count); - PaddedPODArray & res_data = col_res->getData(); - executeImpl2(arguments, result_type, input_rows_count, res_data, col_null_map); - if constexpr (error_handling == ErrorHandling::Null) - return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); - else - return col_res; + MutableColumnPtr col_res = ColumnDateTime::create(input_rows_count); + ColumnDateTime * col_datetime = assert_cast(col_res.get()); + return executeImpl2(arguments, result_type, input_rows_count, col_res, col_datetime->getData()); } } template - void executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, - PaddedPODArray & res_data, ColumnUInt8::MutablePtr & col_null_map) const + ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, + MutableColumnPtr & col_res, PaddedPODArray & res_data) const { const auto * col_str = checkAndGetColumn(arguments[0].column.get()); if (!col_str) @@ -683,8 +723,21 @@ namespace "Illegal column {} of first ('str') argument of function {}. Must be string.", arguments[0].column->getName(), getName()); + + ColumnUInt8::MutablePtr col_null_map; + if constexpr (error_handling == ErrorHandling::Null) + col_null_map = ColumnUInt8::create(input_rows_count, 0); - String format = getFormat(arguments); + Int64 multiplier = 0; + UInt32 precision = 0; + if constexpr (return_type == ReturnType::DateTime64) + { + const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); + precision = datatime64_type->getScale(); + multiplier = DecimalUtils::scaleMultiplier(precision); + } + + String format = getFormat(arguments, precision); const auto & time_zone = getTimeZone(arguments); std::vector instructions = parseFormat(format); @@ -733,8 +786,8 @@ namespace Int64OrError result = 0; - /// Ensure all input was consumed - if (!parseDateTime64 && cur < end) + /// Ensure all input was consumed when the return type is `DateTime`. + if (return_type == ReturnType::DateTime && cur < end) { result = tl::unexpected(ErrorCodeAndMessage( ErrorCodes::CANNOT_PARSE_DATETIME, @@ -747,12 +800,8 @@ namespace { if (result = datetime.buildDateTime(time_zone); result.has_value()) { - if constexpr (parseDateTime64) - { - const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - Int64 multiplier = DecimalUtils::scaleMultiplier(datatime64_type->getScale()); + if constexpr (return_type == ReturnType::DateTime64) res_data[i] = static_cast(*result) * multiplier + datetime.microsecond; - } else res_data[i] = static_cast(*result); } @@ -777,6 +826,10 @@ namespace } } } + if constexpr (error_handling == ErrorHandling::Null) + return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); + else + return std::move(col_res); } @@ -808,7 +861,7 @@ namespace explicit Instruction(const String & literal_) : literal(literal_), fragment("LITERAL") { } explicit Instruction(String && literal_) : literal(std::move(literal_)), fragment("LITERAL") { } - String getFragment() const { return fragment; } + const String & getFragment() const { return fragment; } /// For debug [[maybe_unused]] String toString() const @@ -1695,7 +1748,7 @@ namespace } [[nodiscard]] - static PosOrError jodaMicroSecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime & date) + static PosOrError jodaMicrosecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime & date) { Int32 microsecond; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, microsecond))) @@ -1704,25 +1757,25 @@ namespace } [[nodiscard]] - static PosOrError jodaTimezoneId(size_t, Pos cur, Pos end, const String &, DateTime & date) + static PosOrError jodaTimezone(size_t, Pos cur, Pos end, const String &, DateTime & date) { - String dateTimeZone; + String read_time_zone; while (cur <= end) { - dateTimeZone += *cur; + read_time_zone += *cur; ++cur; } - const DateLUTImpl & date_time_zone = DateLUT::instance(dateTimeZone); + const DateLUTImpl & date_time_zone = DateLUT::instance(read_time_zone); const auto result = date.buildDateTime(date_time_zone); if (result.has_value()) { - const auto timezoneOffset = date_time_zone.timezoneOffset(*result); + const DateLUTImpl::Time timezone_offset = date_time_zone.timezoneOffset(*result); date.has_time_zone_offset = true; - date.time_zone_offset = timezoneOffset; + date.time_zone_offset = timezone_offset; return cur; } else - RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", dateTimeZone) + RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", read_time_zone) } [[nodiscard]] @@ -1745,8 +1798,22 @@ namespace Int32 hour; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, hour))) + if (hour < 0 || hour > 23) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the hour of datetime not in range [0, 23]: {}", + fragment, + std::string_view(cur, end - cur), + std::string_view(cur, 1)) Int32 minute; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, minute))) + if (minute < 0 || minute > 59) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the minute of datetime not in range [0, 59]: {}", + fragment, + std::string_view(cur, end - cur), + std::string_view(cur, 1)) date.has_time_zone_offset = true; date.time_zone_offset = sign * (hour * 3600 + minute * 60); return cur; @@ -2133,10 +2200,10 @@ namespace instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaSecondOfMinute, repetitions)); break; case 'S': - instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicroSecondOfSecond, repetitions)); + instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicrosecondOfSecond, repetitions)); break; case 'z': - instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneId, repetitions)); + instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezone, repetitions)); break; case 'Z': instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneOffset, repetitions)); @@ -2156,26 +2223,45 @@ namespace } - String getFormat(const ColumnsWithTypeAndName & arguments) const + String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 precision) const { - if (arguments.size() == 1) + size_t format_arg_index = 1; + if constexpr (return_type == ReturnType::DateTime64) { - if constexpr (parse_syntax == ParseSyntax::MySQL) - return "%Y-%m-%d %H:%i:%s"; + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated + /// as default value `yyyy-MM-dd HH:mm:ss`. + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// then the second argument is the format. + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// then the third argument is the format. + if (arguments.size() > 1 && isString(removeNullable(arguments[1].type))) + format_arg_index = 1; else - return "yyyy-MM-dd HH:mm:ss"; + format_arg_index = 2; + } + + if (arguments.size() <= format_arg_index) + { + String format; + if constexpr (parse_syntax == ParseSyntax::MySQL) + format = "%Y-%m-%d %H:%i:%s"; + else + format = "yyyy-MM-dd HH:mm:ss"; + if (precision > 0) + format += "." + String(precision, 'S'); + return format; } else { - if (!arguments[1].column || !isColumnConst(*arguments[1].column)) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", 1, getName()); + if (!arguments[format_arg_index].column || !isColumnConst(*arguments[format_arg_index].column)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", format_arg_index, getName()); - const auto * col_format = checkAndGetColumnConst(arguments[1].column.get()); + const auto * col_format = checkAndGetColumnConst(arguments[format_arg_index].column.get()); if (!col_format) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second ('format') argument of function {}. Must be constant string.", - arguments[1].column->getName(), + arguments[format_arg_index].column->getName(), getName()); return col_format->getValue(); } @@ -2183,15 +2269,19 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - if (arguments.size() < 3) + size_t timezone_arg_index = 2; + if constexpr (return_type == ReturnType::DateTime64) + timezone_arg_index = 3; + + if (arguments.size() <= timezone_arg_index) return DateLUT::instance(); - const auto * col = checkAndGetColumnConst(arguments[2].column.get()); + const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of third ('timezone') argument of function {}. Must be constant String.", - arguments[2].column->getName(), + arguments[timezone_arg_index].column->getName(), getName()); String time_zone = col->getValue(); @@ -2229,6 +2319,21 @@ namespace static constexpr auto name = "parseDateTimeInJodaSyntaxOrNull"; }; + struct NameParseDateTime64 + { + static constexpr auto name = "parseDateTime64"; + }; + + struct NameParseDateTime64OrZero + { + static constexpr auto name = "parseDateTime64OrZero"; + }; + + struct NameParseDateTime64OrNull + { + static constexpr auto name = "parseDateTime64OrNull"; + }; + struct NameParseDateTime64InJodaSyntax { static constexpr auto name = "parseDateTime64InJodaSyntax"; @@ -2244,15 +2349,18 @@ namespace static constexpr auto name = "parseDateTime64InJodaSyntaxOrNull"; }; - using FunctionParseDateTime = FunctionParseDateTimeImpl; - using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime = FunctionParseDateTimeImpl; + using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime64 = FunctionParseDateTimeImpl; + using FunctionParseDateTime64OrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTime64OrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl; } REGISTER_FUNCTION(ParseDateTime) @@ -2262,6 +2370,9 @@ REGISTER_FUNCTION(ParseDateTime) factory.registerFunction(); factory.registerFunction(); factory.registerAlias("str_to_date", FunctionParseDateTimeOrNull::name, FunctionFactory::Case::Insensitive); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From 533009b914761e317025b256b31474f44a9b4734 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Wed, 6 Nov 2024 08:57:32 -0400 Subject: [PATCH 234/566] Update AlterCommands.cpp --- src/Storages/AlterCommands.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index ab4403b3a94..c14775057a5 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1496,7 +1496,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const if (command.to_remove == AlterCommand::RemoveProperty::CODEC && column_from_table.codec == nullptr) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Column {} doesn't have TTL, cannot remove it", + "Column {} doesn't have CODEC, cannot remove it", backQuote(column_name)); if (command.to_remove == AlterCommand::RemoveProperty::COMMENT && column_from_table.comment.empty()) throw Exception( From 338af374d88c134b39d75dd1f56f5630cd41fcc2 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 6 Nov 2024 09:52:25 +0100 Subject: [PATCH 235/566] remove the method remove in favor of the method removeIfExists --- .../AzureBlobStorage/AzureObjectStorage.cpp | 20 ++++++++--------- .../AzureBlobStorage/AzureObjectStorage.h | 4 ++-- .../Cached/CachedObjectStorage.cpp | 22 +++++++++---------- .../Cached/CachedObjectStorage.h | 4 ++-- .../DiskObjectStorageTransaction.cpp | 6 ++--- .../ObjectStorages/HDFS/HDFSObjectStorage.h | 4 ++-- src/Disks/ObjectStorages/IObjectStorage.h | 4 ++-- .../ObjectStorages/Local/LocalObjectStorage.h | 4 ++-- .../MetadataStorageFromPlainObjectStorage.cpp | 4 ++-- ...torageFromPlainObjectStorageOperations.cpp | 4 ++-- .../ObjectStorages/S3/S3ObjectStorage.cpp | 16 +++++++------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 4 ++-- .../ObjectStorages/Web/WebObjectStorage.cpp | 16 +++++++------- .../ObjectStorages/Web/WebObjectStorage.h | 4 ++-- .../ObjectStorageQueueSource.cpp | 2 +- 15 files changed, 58 insertions(+), 60 deletions(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 673c82806bd..959afa65672 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -278,17 +278,17 @@ void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const Sha } /// Remove file. Throws exception if file doesn't exists or it's a directory. -void AzureObjectStorage::removeObject(const StoredObject & object) -{ - removeObjectImpl(object, client.get(), false); -} +// void AzureObjectStorage::removeObject(const StoredObject & object) +// { +// removeObjectImpl(object, client.get(), false); +// } -void AzureObjectStorage::removeObjects(const StoredObjects & objects) -{ - auto client_ptr = client.get(); - for (const auto & object : objects) - removeObjectImpl(object, client_ptr, false); -} +// void AzureObjectStorage::removeObjects(const StoredObjects & objects) +// { +// auto client_ptr = client.get(); +// for (const auto & object : objects) +// removeObjectImpl(object, client_ptr, false); +// } void AzureObjectStorage::removeObjectIfExists(const StoredObject & object) { diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 58225eccd90..433fe7a852e 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -60,9 +60,9 @@ public: const WriteSettings & write_settings = {}) override; /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object) override; + //void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + //void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index 163ff3a9c68..f2750e6814f 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -148,19 +148,19 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } -void CachedObjectStorage::removeObject(const StoredObject & object) -{ - removeCacheIfExists(object.remote_path); - object_storage->removeObject(object); -} +// void CachedObjectStorage::removeObject(const StoredObject & object) +// { +// removeCacheIfExists(object.remote_path); +// object_storage->removeObject(object); +// } -void CachedObjectStorage::removeObjects(const StoredObjects & objects) -{ - for (const auto & object : objects) - removeCacheIfExists(object.remote_path); +// void CachedObjectStorage::removeObjects(const StoredObjects & objects) +// { +// for (const auto & object : objects) +// removeCacheIfExists(object.remote_path); - object_storage->removeObjects(objects); -} +// object_storage->removeObjects(objects); +// } void CachedObjectStorage::removeObjectIfExists(const StoredObject & object) { diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index b77baf21e40..7e10057e04c 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -45,9 +45,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + // void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + // void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 64323fb6f3c..19de2bb78af 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -480,8 +480,7 @@ struct WriteFileObjectStorageOperation final : public IDiskObjectStorageOperatio void undo() override { - if (object_storage.exists(object)) - object_storage.removeObject(object); + object_storage.removeObjectIfExists(object); } void finalize() override @@ -543,8 +542,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation void undo() override { - for (const auto & object : created_objects) - destination_object_storage.removeObject(object); + destination_object_storage.removeObjectsIfExist(created_objects); } void finalize() override diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index b53161beb76..317399b4753 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -78,9 +78,9 @@ public: const WriteSettings & write_settings = {}) override; /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object) override; + void removeObject(const StoredObject & object); - void removeObjects(const StoredObjects & objects) override; + void removeObjects(const StoredObjects & objects); void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 8dde96b8b16..adb36762539 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -161,11 +161,11 @@ public: virtual bool isRemote() const = 0; /// Remove object. Throws exception if object doesn't exists. - virtual void removeObject(const StoredObject & object) = 0; + // virtual void removeObject(const StoredObject & object) = 0; /// Remove multiple objects. Some object storages can do batch remove in a more /// optimal way. - virtual void removeObjects(const StoredObjects & objects) = 0; + // virtual void removeObjects(const StoredObjects & objects) = 0; /// Remove object on path if exists virtual void removeObjectIfExists(const StoredObject & object) = 0; diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index f1a0391a984..ffc151bda04 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -42,9 +42,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + void removeObject(const StoredObject & object); - void removeObjects(const StoredObjects & objects) override; + void removeObjects(const StoredObjects & objects); void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index d56c5d9143c..27aa9304de7 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -203,7 +203,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::unlinkFile(const std::str { auto object_key = metadata_storage.object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); auto object = StoredObject(object_key.serialize()); - metadata_storage.object_storage->removeObject(object); + metadata_storage.object_storage->removeObjectIfExists(object); } void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std::string & path) @@ -211,7 +211,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std if (metadata_storage.object_storage->isWriteOnce()) { for (auto it = metadata_storage.iterateDirectory(path); it->isValid(); it->next()) - metadata_storage.object_storage->removeObject(StoredObject(it->path())); + metadata_storage.object_storage->removeObjectIfExists(StoredObject(it->path())); } else { diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index ea57d691908..62015631aa5 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -107,7 +107,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); - object_storage->removeObject(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); + object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); } else if (write_created) object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); @@ -247,7 +247,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME); - object_storage->removeObject(metadata_object); + object_storage->removeObjectIfExists(metadata_object); { std::lock_guard lock(path_map.mutex); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 47ef97401f2..7ed118c6b07 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -326,20 +326,20 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e ProfileEvents::DiskS3DeleteObjects); } -void S3ObjectStorage::removeObject(const StoredObject & object) -{ - removeObjectImpl(object, false); -} +// void S3ObjectStorage::removeObject(const StoredObject & object) +// { +// removeObjectImpl(object, false); +// } void S3ObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, true); } -void S3ObjectStorage::removeObjects(const StoredObjects & objects) -{ - removeObjectsImpl(objects, false); -} +// void S3ObjectStorage::removeObjects(const StoredObjects & objects) +// { +// removeObjectsImpl(objects, false); +// } void S3ObjectStorage::removeObjectsIfExist(const StoredObjects & objects) { diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index d6e84cf57ef..a2aeaf8a43c 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -102,11 +102,11 @@ public: ObjectStorageIteratorPtr iterate(const std::string & path_prefix, size_t max_keys) const override; /// Uses `DeleteObjectRequest`. - void removeObject(const StoredObject & object) override; + //void removeObject(const StoredObject & object) override; /// Uses `DeleteObjectsRequest` if it is allowed by `s3_capabilities`, otherwise `DeleteObjectRequest`. /// `DeleteObjectsRequest` is not supported on GCS, see https://issuetracker.google.com/issues/162653700 . - void removeObjects(const StoredObjects & objects) override; + //void removeObjects(const StoredObjects & objects) override; /// Uses `DeleteObjectRequest`. void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 871d3b506f6..1503d5819eb 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -254,15 +254,15 @@ std::unique_ptr WebObjectStorage::writeObject( /// NOLI throwNotAllowed(); } -void WebObjectStorage::removeObject(const StoredObject &) -{ - throwNotAllowed(); -} +// void WebObjectStorage::removeObject(const StoredObject &) +// { +// throwNotAllowed(); +// } -void WebObjectStorage::removeObjects(const StoredObjects &) -{ - throwNotAllowed(); -} +// void WebObjectStorage::removeObjects(const StoredObjects &) +// { +// throwNotAllowed(); +// } void WebObjectStorage::removeObjectIfExists(const StoredObject &) { diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 573221b7e21..ae52cc20f9b 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -47,9 +47,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + // void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + // void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp index ba1a97bc2fb..e702f07208a 100644 --- a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp +++ b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp @@ -659,7 +659,7 @@ void ObjectStorageQueueSource::applyActionAfterProcessing(const String & path) { if (files_metadata->getTableMetadata().after_processing == ObjectStorageQueueAction::DELETE) { - object_storage->removeObject(StoredObject(path)); + object_storage->removeObjectIfExists(StoredObject(path)); } } From 7795d43055a3bcf4c5f0710152d4c71cc183d000 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 4 Nov 2024 17:03:16 +0100 Subject: [PATCH 236/566] Analyzer: Check what happens after if-condition removal --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index cb3087af707..55bbf4907bb 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -5448,16 +5448,13 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier */ scope.use_identifier_lookup_to_result_cache = false; - if (query_node_typed.getJoinTree()) - { - TableExpressionsAliasVisitor table_expressions_visitor(scope); - table_expressions_visitor.visit(query_node_typed.getJoinTree()); + TableExpressionsAliasVisitor table_expressions_visitor(scope); + table_expressions_visitor.visit(query_node_typed.getJoinTree()); - initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); - scope.aliases.alias_name_to_table_expression_node.clear(); + initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); + scope.aliases.alias_name_to_table_expression_node.clear(); - resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); - } + resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); if (!scope.group_by_use_nulls) scope.use_identifier_lookup_to_result_cache = true; From 020b69647a65dd740cddfbf62730f37de14a4eb8 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 6 Nov 2024 15:15:29 +0000 Subject: [PATCH 237/566] Fix counting column size in wide part for Dynamid and JSON types --- .../MergeTree/MergeTreeDataPartWide.cpp | 2 +- .../MergeTree/MergeTreeReaderWide.cpp | 2 +- ...umn_sizes_with_dynamic_structure.reference | 1 + ...62_column_sizes_with_dynamic_structure.sql | 22 +++++++++++++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference create mode 100644 tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index d6f213463f2..d8470ba8405 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -108,7 +108,7 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( auto mrk_checksum = checksums.files.find(*stream_name + getMarksFileExtension()); if (mrk_checksum != checksums.files.end()) size.marks += mrk_checksum->second.file_size; - }); + }, column.type, getColumnSample(column)); return size; } diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 77231d8d392..885bd1ded8c 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -172,7 +172,7 @@ size_t MergeTreeReaderWide::readRows( throw; } - if (column->empty()) + if (column->empty() && max_rows_to_read > 0) res_columns[pos] = nullptr; } diff --git a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference new file mode 100644 index 00000000000..5cab16ed96d --- /dev/null +++ b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference @@ -0,0 +1 @@ +test 10.00 million 352.87 MiB 39.43 MiB 39.45 MiB diff --git a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql new file mode 100644 index 00000000000..21e6515fc99 --- /dev/null +++ b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql @@ -0,0 +1,22 @@ +-- Tags: no-random-settings + +set allow_experimental_dynamic_type = 1; +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=1; +insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(10000000); + +SELECT + `table`, + formatReadableQuantity(sum(rows)) AS rows, + formatReadableSize(sum(data_uncompressed_bytes)) AS data_size_uncompressed, + formatReadableSize(sum(data_compressed_bytes)) AS data_size_compressed, + formatReadableSize(sum(bytes_on_disk)) AS total_size_on_disk +FROM system.parts +WHERE active AND (database = currentDatabase()) AND (`table` = 'test') +GROUP BY `table` +ORDER BY `table` ASC; + +drop table test; + From d67b62c2223cd8008fbfb138df6b0f9c59d9acd5 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 10:50:45 +0100 Subject: [PATCH 238/566] Upgrade clickhouse-server and keeper base images --- docker/keeper/Dockerfile | 10 +++++++--- docker/server/Dockerfile.ubuntu | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index bc76bdbb619..4ecc087afb4 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -1,7 +1,7 @@ # The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script # If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically, # so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes -FROM ubuntu:20.04 AS glibc-donor +FROM ubuntu:22.04 AS glibc-donor ARG TARGETARCH RUN arch=${TARGETARCH:-amd64} \ @@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \ amd64) rarch=x86_64 ;; \ arm64) rarch=aarch64 ;; \ esac \ - && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu \ + && case $arch in \ + amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \ + arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \ + esac FROM alpine @@ -20,7 +24,7 @@ ENV LANG=en_US.UTF-8 \ TZ=UTC \ CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml -COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/ COPY --from=glibc-donor /etc/nsswitch.conf /etc/ COPY entrypoint.sh /entrypoint.sh diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 506a627b11c..0d5c983f5e6 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 # see https://github.com/moby/moby/issues/4032#issuecomment-192327844 # It could be removed after we move on a version 23:04+ From 2903227143360795fc4912322de9963ec7f8c3ef Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 10:58:21 +0100 Subject: [PATCH 239/566] Remove strange wrong named dockerfile --- .../clickhouse-statelest-test-runner.Dockerfile | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile diff --git a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile b/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile deleted file mode 100644 index a9802f6f1da..00000000000 --- a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Since right now we can't set volumes to the docker during build, we split building container in stages: -# 1. build base container -# 2. run base conatiner with mounted volumes -# 3. commit container as image -FROM ubuntu:20.04 as clickhouse-test-runner-base - -# A volume where directory with clickhouse packages to be mounted, -# for later installing. -VOLUME /packages - -CMD apt-get update ;\ - DEBIAN_FRONTEND=noninteractive \ - apt install -y /packages/clickhouse-common-static_*.deb \ - /packages/clickhouse-client_*.deb \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* From c1345d38c8e987838704a4ae7da6cb05af8257c2 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 6 Nov 2024 15:44:22 +0000 Subject: [PATCH 240/566] Fix flakiness in 03254_pr_join_on_dups --- src/Interpreters/IJoin.h | 1 - src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp | 1 - tests/queries/0_stateless/03254_pr_join_on_dups.sql | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h index 8f648de2538..5a83137ca2a 100644 --- a/src/Interpreters/IJoin.h +++ b/src/Interpreters/IJoin.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include diff --git a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp index c0b31864eac..35d340b4bbf 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp @@ -16,7 +16,6 @@ #include #include -#include namespace DB::QueryPlanOptimizations { diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 222f7693090..166910d496f 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -10,6 +10,7 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From df632b6f1e4d825644138c77c2ae4a25943a7fe8 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 6 Nov 2024 16:44:52 +0100 Subject: [PATCH 241/566] clean up --- .../AzureBlobStorage/AzureObjectStorage.cpp | 13 ------------- .../AzureBlobStorage/AzureObjectStorage.h | 5 ----- .../ObjectStorages/Cached/CachedObjectStorage.cpp | 14 -------------- .../ObjectStorages/Cached/CachedObjectStorage.h | 4 ---- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 10 ---------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 7 ------- src/Disks/ObjectStorages/Web/WebObjectStorage.cpp | 10 ---------- src/Disks/ObjectStorages/Web/WebObjectStorage.h | 4 ---- 8 files changed, 67 deletions(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 959afa65672..b8386bcf967 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -277,19 +277,6 @@ void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const Sha } } -/// Remove file. Throws exception if file doesn't exists or it's a directory. -// void AzureObjectStorage::removeObject(const StoredObject & object) -// { -// removeObjectImpl(object, client.get(), false); -// } - -// void AzureObjectStorage::removeObjects(const StoredObjects & objects) -// { -// auto client_ptr = client.get(); -// for (const auto & object : objects) -// removeObjectImpl(object, client_ptr, false); -// } - void AzureObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, client.get(), true); diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 433fe7a852e..401493be367 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -59,11 +59,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - /// Remove file. Throws exception if file doesn't exists or it's a directory. - //void removeObject(const StoredObject & object) override; - - //void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index f2750e6814f..779b8830fab 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -148,20 +148,6 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } -// void CachedObjectStorage::removeObject(const StoredObject & object) -// { -// removeCacheIfExists(object.remote_path); -// object_storage->removeObject(object); -// } - -// void CachedObjectStorage::removeObjects(const StoredObjects & objects) -// { -// for (const auto & object : objects) -// removeCacheIfExists(object.remote_path); - -// object_storage->removeObjects(objects); -// } - void CachedObjectStorage::removeObjectIfExists(const StoredObject & object) { removeCacheIfExists(object.remote_path); diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 7e10057e04c..77aa635b89b 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -45,10 +45,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - // void removeObject(const StoredObject & object) override; - - // void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 7ed118c6b07..9fca3cad688 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -326,21 +326,11 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e ProfileEvents::DiskS3DeleteObjects); } -// void S3ObjectStorage::removeObject(const StoredObject & object) -// { -// removeObjectImpl(object, false); -// } - void S3ObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, true); } -// void S3ObjectStorage::removeObjects(const StoredObjects & objects) -// { -// removeObjectsImpl(objects, false); -// } - void S3ObjectStorage::removeObjectsIfExist(const StoredObjects & objects) { removeObjectsImpl(objects, true); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index a2aeaf8a43c..4b9c968ede9 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -101,13 +101,6 @@ public: ObjectStorageIteratorPtr iterate(const std::string & path_prefix, size_t max_keys) const override; - /// Uses `DeleteObjectRequest`. - //void removeObject(const StoredObject & object) override; - - /// Uses `DeleteObjectsRequest` if it is allowed by `s3_capabilities`, otherwise `DeleteObjectRequest`. - /// `DeleteObjectsRequest` is not supported on GCS, see https://issuetracker.google.com/issues/162653700 . - //void removeObjects(const StoredObjects & objects) override; - /// Uses `DeleteObjectRequest`. void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 1503d5819eb..35abc0ed0df 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -254,16 +254,6 @@ std::unique_ptr WebObjectStorage::writeObject( /// NOLI throwNotAllowed(); } -// void WebObjectStorage::removeObject(const StoredObject &) -// { -// throwNotAllowed(); -// } - -// void WebObjectStorage::removeObjects(const StoredObjects &) -// { -// throwNotAllowed(); -// } - void WebObjectStorage::removeObjectIfExists(const StoredObject &) { throwNotAllowed(); diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index ae52cc20f9b..1e612bd359c 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -47,10 +47,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - // void removeObject(const StoredObject & object) override; - - // void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; From 8bb656ddec205c9836db55c8a459a6b9c2cbf3d1 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 6 Nov 2024 15:55:41 +0000 Subject: [PATCH 242/566] Add context manager for partition manager --- tests/integration/test_quorum_inserts/test.py | 81 ++++++++++--------- 1 file changed, 43 insertions(+), 38 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index a646319c5f9..5e4a960acdf 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -379,50 +379,55 @@ def test_insert_quorum_with_keeper_loss_connection(started_cluster): ) ) - pm = PartitionManager() - pm.drop_instance_zk_connections(zero) + with PartitionManager() as pm: + pm.drop_instance_zk_connections(zero) - retries = 0 - zk = cluster.get_kazoo_client("zoo1") - while True: - if ( - zk.exists(f"/clickhouse/tables/{table_name}/replicas/zero/is_active") - is None - ): - break - print("replica is still active") - time.sleep(1) - retries += 1 - if retries == 120: - raise Exception("Can not wait cluster replica inactive") + retries = 0 + zk = cluster.get_kazoo_client("zoo1") + while True: + if ( + zk.exists( + f"/clickhouse/tables/{table_name}/replicas/zero/is_active" + ) + is None + ): + break + print("replica is still active") + time.sleep(1) + retries += 1 + if retries == 120: + raise Exception("Can not wait cluster replica inactive") - first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") - quorum_fail_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 + first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") + quorum_fail_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 + ) ) - ) - first.query(f"SYSTEM START FETCHES {table_name}") + first.query(f"SYSTEM START FETCHES {table_name}") - concurrent.futures.wait([quorum_fail_future]) + concurrent.futures.wait([quorum_fail_future]) - assert quorum_fail_future.exception() is None + assert quorum_fail_future.exception() is None - zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") - clean_quorum_fail_parts_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", timeout=300 + zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") + clean_quorum_fail_parts_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", + timeout=300, + ) ) - ) - pm.restore_instance_zk_connections(zero) - concurrent.futures.wait([clean_quorum_fail_parts_future]) + pm.restore_instance_zk_connections(zero) + concurrent.futures.wait([clean_quorum_fail_parts_future]) - assert clean_quorum_fail_parts_future.exception() is None + assert clean_quorum_fail_parts_future.exception() is None - zero.query("SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause") - concurrent.futures.wait([insert_future]) - assert insert_future.exception() is not None - assert not zero.contains_in_log("LOGICAL_ERROR") - assert zero.contains_in_log( - "fails to commit and will not retry or clean garbage" - ) + zero.query( + "SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause" + ) + concurrent.futures.wait([insert_future]) + assert insert_future.exception() is not None + assert not zero.contains_in_log("LOGICAL_ERROR") + assert zero.contains_in_log( + "fails to commit and will not retry or clean garbage" + ) From 530c04413eaf2839fb3fbdef3619628916e63405 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 6 Nov 2024 19:59:41 +0300 Subject: [PATCH 243/566] Analyzer materialized view IN with CTE fix --- src/Analyzer/QueryNode.h | 12 ++++ src/Analyzer/Resolve/QueryAnalyzer.cpp | 48 +++++++++----- src/Analyzer/UnionNode.cpp | 21 +++++++ src/Analyzer/UnionNode.h | 3 + ...er_materialized_view_in_with_cte.reference | 1 + ...analyzer_materialized_view_in_with_cte.sql | 63 +++++++++++++++++++ ...zer_materialized_view_cte_nested.reference | 0 ..._analyzer_materialized_view_cte_nested.sql | 19 ++++++ 8 files changed, 150 insertions(+), 17 deletions(-) create mode 100644 tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference create mode 100644 tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql create mode 100644 tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference create mode 100644 tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql diff --git a/src/Analyzer/QueryNode.h b/src/Analyzer/QueryNode.h index aef0c8805bb..2333fc56218 100644 --- a/src/Analyzer/QueryNode.h +++ b/src/Analyzer/QueryNode.h @@ -602,9 +602,21 @@ public: return projection_columns; } + /// Returns true if query node is resolved, false otherwise + bool isResolved() const + { + return !projection_columns.empty(); + } + /// Resolve query node projection columns void resolveProjectionColumns(NamesAndTypes projection_columns_value); + /// Clear query node projection columns + void clearProjectionColumns() + { + projection_columns.clear(); + } + /// Remove unused projection columns void removeUnusedProjectionColumns(const std::unordered_set & used_projection_columns); diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index cb3087af707..c0a2de0f125 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -2958,27 +2958,28 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi /// Replace storage with values storage of insertion block if (StoragePtr storage = scope.context->getViewSource()) { - QueryTreeNodePtr table_expression; - /// Process possibly nested sub-selects - for (auto * query_node = in_second_argument->as(); query_node; query_node = table_expression->as()) - table_expression = extractLeftTableExpression(query_node->getJoinTree()); + QueryTreeNodePtr table_expression = in_second_argument; - if (table_expression) + /// Process possibly nested sub-selects + while (table_expression) { - if (auto * query_table_node = table_expression->as()) - { - if (query_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) - { - auto replacement_table_expression = std::make_shared(storage, scope.context); - if (std::optional table_expression_modifiers = query_table_node->getTableExpressionModifiers()) - replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers); - in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression)); - } - } + if (auto * query_node = table_expression->as()) + table_expression = extractLeftTableExpression(query_node->getJoinTree()); + else if (auto * union_node = table_expression->as()) + table_expression = union_node->getQueries().getNodes().at(0); + else + break; + } + + auto * table_expression_table_node = table_expression->as(); + if (table_expression_table_node && + table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) + { + auto replacement_table_expression_table_node = table_expression_table_node->clone(); + replacement_table_expression_table_node->as().updateStorage(storage, scope.context); + in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression_table_node)); } } - - resolveExpressionNode(in_second_argument, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/); } /// Edge case when the first argument of IN is scalar subquery. @@ -5310,6 +5311,16 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier auto & query_node_typed = query_node->as(); + /** It is unsafe to call resolveQuery on already resolved query node, because during identifier resolution process + * we replace identifiers with expressions without aliases, also at the end of resolveQuery all aliases from all nodes will be removed. + * For subsequent resolveQuery executions it is possible to have wrong projection header, because for nodes + * with aliases projection name is alias. + * + * If for client it is necessary to resolve query node after clone, client must clear projection columns from query node before resolve. + */ + if (query_node_typed.isResolved()) + return; + if (query_node_typed.isCTE()) ctes_in_resolve_process.insert(query_node_typed.getCTEName()); @@ -5675,6 +5686,9 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier { auto & union_node_typed = union_node->as(); + if (union_node_typed.isResolved()) + return; + if (union_node_typed.isCTE()) ctes_in_resolve_process.insert(union_node_typed.getCTEName()); diff --git a/src/Analyzer/UnionNode.cpp b/src/Analyzer/UnionNode.cpp index 6f70f01e519..545a6b2195b 100644 --- a/src/Analyzer/UnionNode.cpp +++ b/src/Analyzer/UnionNode.cpp @@ -35,6 +35,7 @@ namespace ErrorCodes { extern const int TYPE_MISMATCH; extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) @@ -50,6 +51,26 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) children[queries_child_index] = std::make_shared(); } +bool UnionNode::isResolved() const +{ + for (const auto & query_node : getQueries().getNodes()) + { + bool is_resolved = false; + + if (auto * query_node_typed = query_node->as()) + is_resolved = query_node_typed->isResolved(); + else if (auto * union_node_typed = query_node->as()) + is_resolved = union_node_typed->isResolved(); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected query tree node type in UNION node"); + + if (!is_resolved) + return false; + } + + return true; +} + NamesAndTypes UnionNode::computeProjectionColumns() const { if (recursive_cte_table) diff --git a/src/Analyzer/UnionNode.h b/src/Analyzer/UnionNode.h index 40baad1ad57..85d6afb1e47 100644 --- a/src/Analyzer/UnionNode.h +++ b/src/Analyzer/UnionNode.h @@ -163,6 +163,9 @@ public: return children[queries_child_index]; } + /// Returns true if union node is resolved, false otherwise + bool isResolved() const; + /// Compute union node projection columns NamesAndTypes computeProjectionColumns() const; diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference new file mode 100644 index 00000000000..5ddf8439af5 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference @@ -0,0 +1 @@ +1 2 \N test diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql new file mode 100644 index 00000000000..4543d336d14 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql @@ -0,0 +1,63 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS mv_test; +DROP TABLE IF EXISTS mv_test_target; +DROP VIEW IF EXISTS mv_test_mv; + +CREATE TABLE mv_test +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE TABLE mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE MATERIALIZED VIEW mv_test_mv TO mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +AS WITH + tester AS + ( + SELECT + id, + ref_id, + final_id, + display + FROM mv_test + ), + id_set AS + ( + SELECT + display, + max(id) AS max_id + FROM mv_test + GROUP BY display + ) +SELECT * +FROM tester +WHERE id IN ( + SELECT max_id + FROM id_set +); + +INSERT INTO mv_test ( id, ref_id, display) values ( 1, 2, 'test'); + +SELECT * FROM mv_test_target; + +DROP VIEW mv_test_mv; +DROP TABLE mv_test_target; +DROP TABLE mv_test; diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql new file mode 100644 index 00000000000..4ea853a7c22 --- /dev/null +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql @@ -0,0 +1,19 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +DROP VIEW IF EXISTS test_mv; + +CREATE TABLE test_table ENGINE = MergeTree ORDER BY tuple() AS SELECT 1 as col1; + +CREATE MATERIALIZED VIEW test_mv ENGINE = MergeTree ORDER BY tuple() AS +WITH + subquery_on_source AS (SELECT col1 AS aliased FROM test_table), + output AS (SELECT * FROM test_table WHERE col1 IN (SELECT aliased FROM subquery_on_source)) +SELECT * FROM output; + +INSERT INTO test_table VALUES (2); + +SELECT * FROM test_mv; + +DROP VIEW test_mv; +DROP TABLE test_table; From 4ad8273e5f3d16f5a95220824223800b4a356e26 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 6 Nov 2024 17:31:24 +0000 Subject: [PATCH 244/566] Enable merge filters optimization. --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 + .../QueryPlanOptimizationSettings.h | 2 +- .../03262_filter_push_down_view.reference | 2 ++ .../03262_filter_push_down_view.sql | 36 +++++++++++++++++++ 5 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03262_filter_push_down_view.reference create mode 100644 tests/queries/0_stateless/03262_filter_push_down_view.sql diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..6f8047bbdf8 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4554,7 +4554,7 @@ Possible values: - 0 - Disable - 1 - Enable )", 0) \ - DECLARE(Bool, query_plan_merge_filters, false, R"( + DECLARE(Bool, query_plan_merge_filters, true, R"( Allow to merge filters in the query plan )", 0) \ DECLARE(Bool, query_plan_filter_push_down, true, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ed87fde8b7e..12350b6cdaf 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -74,6 +74,7 @@ static std::initializer_list Date: Tue, 5 Nov 2024 14:02:43 +0000 Subject: [PATCH 245/566] Reduce the general critical section for query_metric_log - Use a separate mutex for each query to reduce the contention period for queries_mutex. - Refactor to use std::mutex instead of std::recursive_mutex for queries_mutex. - In case we're running late to schedule the next task, schedule it immediately. - Fix LockGuard because unlocking twice is undefined behavior. --- base/base/defines.h | 1 + src/Common/LockGuard.h | 32 +++++- src/Interpreters/QueryMetricLog.cpp | 165 +++++++++++++++++++--------- src/Interpreters/QueryMetricLog.h | 43 ++++++-- 4 files changed, 179 insertions(+), 62 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 5685a6d9833..a0c3c0d1de5 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -145,6 +145,7 @@ #define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure #define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability #define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability +#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions /// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function) /// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of diff --git a/src/Common/LockGuard.h b/src/Common/LockGuard.h index 8a98c5f553a..03c8a3e7617 100644 --- a/src/Common/LockGuard.h +++ b/src/Common/LockGuard.h @@ -1,23 +1,47 @@ #pragma once -#include #include +#include +#include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + /** LockGuard provides RAII-style locking mechanism for a mutex. - ** It's intended to be used like std::unique_ptr but with TSA annotations + ** It's intended to be used like std::unique_lock but with TSA annotations */ template class TSA_SCOPED_LOCKABLE LockGuard { public: - explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { mutex.lock(); } - ~LockGuard() TSA_RELEASE() { mutex.unlock(); } + explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { lock(); } + ~LockGuard() TSA_RELEASE() { if (locked) unlock(); } + + void lock() TSA_ACQUIRE() + { + /// Don't allow recursive_mutex for now. + if (locked) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't lock twice the same mutex"); + mutex.lock(); + locked = true; + } + + void unlock() TSA_RELEASE() + { + if (!locked) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't unlock the mutex without locking it first"); + mutex.unlock(); + locked = false; + } private: Mutex & mutex; + bool locked = false; }; template typename TLockGuard, typename Mutex> diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index 5ab3fe590e0..e784c357b29 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include -#include namespace DB @@ -24,6 +24,20 @@ namespace DB static auto logger = getLogger("QueryMetricLog"); +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + +String timePointToString(QueryMetricLog::TimePoint time) +{ + /// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves. + auto seconds = std::chrono::time_point_cast(time); + auto microseconds = std::chrono::duration_cast(time - seconds).count(); + + return fmt::format("{:%Y.%m.%d %H:%M:%S}.{:06}", seconds, microseconds); +} + ColumnsDescription QueryMetricLogElement::getColumnsDescription() { ColumnsDescription result; @@ -87,36 +101,69 @@ void QueryMetricLog::shutdown() Base::shutdown(); } -void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds) +void QueryMetricLog::collectMetric(const ProcessList & process_list, String query_id) { - QueryMetricLogStatus status; - status.interval_milliseconds = interval_milliseconds; - status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); + auto current_time = std::chrono::system_clock::now(); + const auto query_info = process_list.getQueryInfo(query_id, false, true, false); + if (!query_info) + { + LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); + return; + } + + LockGuard global_lock(queries_mutex); + auto it = queries.find(query_id); + + /// The query might have finished while the scheduled task is running. + if (it == queries.end()) + { + global_lock.unlock(); + LOG_TRACE(logger, "Query {} not found in the list. Finished while this collecting task was running", query_id); + return; + } + + auto & query_status = it->second; + if (!query_status.mutex) + { + global_lock.unlock(); + LOG_TRACE(logger, "Query {} finished while this collecting task was running", query_id); + return; + } + + LockGuard query_lock(query_status.getMutex()); + global_lock.unlock(); + + auto elem = query_status.createLogMetricElement(query_id, *query_info, current_time); + if (elem) + add(std::move(elem.value())); +} + +/// We use TSA_NO_THREAD_SAFETY_ANALYSIS to prevent TSA complaining that we're modifying the query_status fields +/// without locking the mutex. Since we're building it from scratch, there's no harm in not holding it. +/// If we locked it to make TSA happy, TSAN build would falsely complain about +/// lock-order-inversion (potential deadlock) +/// which is not a real issue since QueryMetricLogStatus's mutex cannot be locked by anything else +/// until we add it to the queries map. +void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds) TSA_NO_THREAD_SAFETY_ANALYSIS +{ + QueryMetricLogStatus query_status; + query_status.interval_milliseconds = interval_milliseconds; + query_status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); auto context = getContext(); const auto & process_list = context->getProcessList(); - status.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { - auto current_time = std::chrono::system_clock::now(); - const auto query_info = process_list.getQueryInfo(query_id, false, true, false); - if (!query_info) - { - LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); - return; - } - - auto elem = createLogMetricElement(query_id, *query_info, current_time); - if (elem) - add(std::move(elem.value())); + query_status.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { + collectMetric(process_list, query_id); }); - std::lock_guard lock(queries_mutex); - status.task->scheduleAfter(interval_milliseconds); - queries.emplace(query_id, std::move(status)); + LockGuard global_lock(queries_mutex); + query_status.scheduleNext(query_id); + queries.emplace(query_id, std::move(query_status)); } void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info) { - std::unique_lock lock(queries_mutex); + LockGuard global_lock(queries_mutex); auto it = queries.find(query_id); /// finishQuery may be called from logExceptionBeforeStart when the query has not even started @@ -124,9 +171,19 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, if (it == queries.end()) return; + auto & query_status = it->second; + decltype(query_status.mutex) query_mutex; + LockGuard query_lock(query_status.getMutex()); + + /// Move the query mutex here so that we hold it until the end, after removing the query from queries. + query_mutex = std::move(query_status.mutex); + query_status.mutex = {}; + + global_lock.unlock(); + if (query_info) { - auto elem = createLogMetricElement(query_id, *query_info, finish_time, false); + auto elem = query_status.createLogMetricElement(query_id, *query_info, finish_time, false); if (elem) add(std::move(elem.value())); } @@ -139,51 +196,62 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, /// that order. { /// Take ownership of the task so that we can destroy it in this scope after unlocking `queries_mutex`. - auto task = std::move(it->second.task); + auto task = std::move(query_status.task); /// Build an empty task for the old task to make sure it does not lock any mutex on its destruction. - it->second.task = {}; + query_status.task = {}; + query_lock.unlock(); + global_lock.lock(); queries.erase(query_id); /// Ensure `queries_mutex` is unlocked before calling task's destructor at the end of this /// scope which will lock `exec_mutex`. - lock.unlock(); + global_lock.unlock(); } } -std::optional QueryMetricLog::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) +void QueryMetricLogStatus::scheduleNext(String query_id) { - /// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves. - auto seconds = std::chrono::time_point_cast(query_info_time); - auto microseconds = std::chrono::duration_cast(query_info_time - seconds).count(); - LOG_DEBUG(logger, "Collecting query_metric_log for query {} with QueryStatusInfo from {:%Y.%m.%d %H:%M:%S}.{:06}. Schedule next: {}", query_id, seconds, microseconds, schedule_next); - - std::unique_lock lock(queries_mutex); - auto query_status_it = queries.find(query_id); - - /// The query might have finished while the scheduled task is running. - if (query_status_it == queries.end()) + const auto now = std::chrono::system_clock::now(); + if (next_collect_time > now) { - lock.unlock(); - LOG_TRACE(logger, "Query {} finished already while this collecting task was running", query_id); - return {}; + const auto wait_time = std::chrono::duration_cast(next_collect_time - now).count(); + task->scheduleAfter(wait_time); } - - auto & query_status = query_status_it->second; - if (query_info_time <= query_status.last_collect_time) + else + { + LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); + task->schedule(); + } +} + +std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) +{ + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Schedule next: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next); + + if (query_info_time <= last_collect_time) { - lock.unlock(); LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } - query_status.last_collect_time = query_info_time; + /// Leave some margin because task->scheduleAfter takes a value in milliseconds. + /// So, we can expect up to 1ms of drift since BackgroundSchedulePool will compare + /// time points in milliseconds. + static auto error_margin = std::chrono::milliseconds(1); + if (schedule_next && query_info_time + error_margin < next_collect_time) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Task to collect metric for query {} scheduled at {} but run at {}", + query_id, timePointToString(next_collect_time), timePointToString(query_info_time)); + + last_collect_time = query_info_time; QueryMetricLogElement elem; elem.event_time = timeInSeconds(query_info_time); elem.event_time_microseconds = timeInMicroseconds(query_info_time); - elem.query_id = query_status_it->first; + elem.query_id = query_id; elem.memory_usage = query_info.memory_usage > 0 ? query_info.memory_usage : 0; elem.peak_memory_usage = query_info.peak_memory_usage > 0 ? query_info.peak_memory_usage : 0; @@ -192,7 +260,7 @@ std::optional QueryMetricLog::createLogMetricElement(cons for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i) { const auto & new_value = (*(query_info.profile_counters))[i]; - auto & old_value = query_status.last_profile_events[i]; + auto & old_value = last_profile_events[i]; /// Profile event counters are supposed to be monotonic. However, at least the `NetworkReceiveBytes` can be inaccurate. /// So, since in the future the counter should always have a bigger value than in the past, we skip this event. @@ -214,9 +282,8 @@ std::optional QueryMetricLog::createLogMetricElement(cons if (schedule_next) { - query_status.next_collect_time += std::chrono::milliseconds(query_status.interval_milliseconds); - const auto wait_time = std::chrono::duration_cast(query_status.next_collect_time - std::chrono::system_clock::now()).count(); - query_status.task->scheduleAfter(wait_time); + next_collect_time += std::chrono::milliseconds(interval_milliseconds); + scheduleNext(query_id); } return elem; diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 802cee7bf26..65764229b0a 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -11,11 +12,17 @@ #include #include +#include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + /** QueryMetricLogElement is a log of query metric values measured at regular time interval. */ @@ -36,31 +43,49 @@ struct QueryMetricLogElement struct QueryMetricLogStatus { + using TimePoint = std::chrono::system_clock::time_point; + using Mutex = std::mutex; + UInt64 interval_milliseconds; - std::chrono::system_clock::time_point last_collect_time; - std::chrono::system_clock::time_point next_collect_time; - std::vector last_profile_events = std::vector(ProfileEvents::end()); - BackgroundSchedulePool::TaskHolder task; + std::chrono::system_clock::time_point last_collect_time TSA_GUARDED_BY(getMutex()); + std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); + std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); + BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); + + /// We need to be able to move it for the hash map, so we need to add an indirection here. + std::unique_ptr mutex = std::make_unique(); + + /// Return a reference to the mutex, used for Thread Sanitizer annotations. + Mutex & getMutex() const TSA_RETURN_CAPABILITY(mutex) + { + if (!mutex) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutex cannot be NULL"); + return *mutex; + } + + void scheduleNext(String query_id) TSA_REQUIRES(getMutex()); + std::optional createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next = true) TSA_REQUIRES(getMutex()); }; class QueryMetricLog : public SystemLog { using SystemLog::SystemLog; - using TimePoint = std::chrono::system_clock::time_point; using Base = SystemLog; public: + using TimePoint = std::chrono::system_clock::time_point; + void shutdown() final; - // Both startQuery and finishQuery are called from the thread that executes the query + /// Both startQuery and finishQuery are called from the thread that executes the query. void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: - std::optional createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next = true); + void collectMetric(const ProcessList & process_list, String query_id); - std::recursive_mutex queries_mutex; - std::unordered_map queries; + std::mutex queries_mutex; + std::unordered_map queries TSA_GUARDED_BY(queries_mutex); }; } From 26f0ba2c4ceb4b6d52f159943de63d4f2ca10520 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 21:23:06 +0100 Subject: [PATCH 246/566] Update compatibility section for clickhouse-server docker image --- docker/server/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/server/README.md b/docker/server/README.md index 65239126790..1dc636414ac 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. ## How to use this image From 157f745136094eb2eaeae72f17d103928194fd52 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 22:09:12 +0100 Subject: [PATCH 247/566] Write a simple troubleshooting for an old docker and clickhouse-server --- docs/en/operations/_troubleshooting.md | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/en/operations/_troubleshooting.md b/docs/en/operations/_troubleshooting.md index 77389782675..f0ee1ca1d29 100644 --- a/docs/en/operations/_troubleshooting.md +++ b/docs/en/operations/_troubleshooting.md @@ -65,6 +65,34 @@ sudo rm -f /etc/yum.repos.d/clickhouse.repo After that follow the [install guide](../getting-started/install.md#from-rpm-packages) +### You Can't Run Docker Container + +You are running a simple `docker run clickhouse/clickhouse-server` and it crashes with a stack trace similar to following: + +``` +$ docker run -it clickhouse/clickhouse-server +........ +2024.11.06 21:04:48.912036 [ 1 ] {} SentryWriter: Sending crash reports is disabled +Poco::Exception. Code: 1000, e.code() = 0, System exception: cannot start thread, Stack trace (when copying this message, always include the lines below): + +0. Poco::ThreadImpl::startImpl(Poco::SharedPtr>) @ 0x00000000157c7b34 +1. Poco::Thread::start(Poco::Runnable&) @ 0x00000000157c8a0e +2. BaseDaemon::initializeTerminationAndSignalProcessing() @ 0x000000000d267a14 +3. BaseDaemon::initialize(Poco::Util::Application&) @ 0x000000000d2652cb +4. DB::Server::initialize(Poco::Util::Application&) @ 0x000000000d128b38 +5. Poco::Util::Application::run() @ 0x000000001581cfda +6. DB::Server::run() @ 0x000000000d1288f0 +7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015825e27 +8. mainEntryClickHouseServer(int, char**) @ 0x000000000d125b38 +9. main @ 0x0000000007ea4eee +10. ? @ 0x00007f67ff946d90 +11. ? @ 0x00007f67ff946e40 +12. _start @ 0x00000000062e802e + (version 24.10.1.2812 (official build)) +``` + +The reason is an old docker daemon with version lower than `20.10.10`. A way to fix it either upgrading it, or running `docker run [--privileged | --security-opt seccomp=unconfined]`. The latter has security implications. + ## Connecting to the Server {#troubleshooting-accepts-no-connections} Possible issues: From 29aed6a58629dadca25840e976a4e680ac55a963 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 6 Nov 2024 23:38:56 +0000 Subject: [PATCH 248/566] Fix compatibility with refreshable materialized views created by old clickhouse servers --- src/Storages/StorageMaterializedView.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d047b28e076..d56b09eec67 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -228,10 +228,20 @@ StorageMaterializedView::StorageMaterializedView( if (!fixed_uuid) { - if (to_inner_uuid != UUIDHelpers::Nil) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); - if (to_table_id.hasUUID()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + if (mode >= LoadingStrictnessLevel::ATTACH) + { + /// Old versions of ClickHouse (when refreshable MV was experimental) could add useless + /// UUIDs to attach queries. + to_table_id.uuid = UUIDHelpers::Nil; + to_inner_uuid = UUIDHelpers::Nil; + } + else + { + if (to_inner_uuid != UUIDHelpers::Nil) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); + if (to_table_id.hasUUID()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + } } if (!has_inner_table) From 8fb52b72b5bc1a4324cedaf2171e1af4e777f1af Mon Sep 17 00:00:00 2001 From: cangyin Date: Fri, 14 Jun 2024 12:58:46 +0000 Subject: [PATCH 249/566] Fix use-after-dtor logic in hashtable destroyElements --- src/Common/HashTable/HashTable.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index f4374a0f2ca..d379c3f6a87 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -658,16 +658,11 @@ protected: { if (!std::is_trivially_destructible_v) { - for (iterator it = begin(), it_end = end(); it != it_end; ++it) + for (iterator it = begin(), it_end = end(); it != it_end;) { - it.ptr->~Cell(); - /// In case of poison_in_dtor=1 it will be poisoned, - /// but it maybe used later, during iteration. - /// - /// NOTE, that technically this is UB [1], but OK for now. - /// - /// [1]: https://github.com/google/sanitizers/issues/854#issuecomment-329661378 - __msan_unpoison(it.ptr, sizeof(*it.ptr)); + auto ptr = it.ptr; + ++it; + ptr->~Cell(); } /// Everything had been destroyed in the loop above, reset the flag From 1c74206bf2fddd2aad8f96699769e20dd122979a Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 18:00:14 +0800 Subject: [PATCH 250/566] add parseDateTime64 functions --- src/Functions/parseDateTime.cpp | 145 ++++++++++++------ .../03252_parse_datetime64.reference | 17 ++ .../0_stateless/03252_parse_datetime64.sql | 32 ++++ ..._parse_datetime64_in_joda_syntax.reference | 32 ++-- .../03252_parse_datetime64_in_joda_syntax.sql | 60 ++++++-- 5 files changed, 214 insertions(+), 72 deletions(-) create mode 100644 tests/queries/0_stateless/03252_parse_datetime64.reference create mode 100644 tests/queries/0_stateless/03252_parse_datetime64.sql diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 976be53a21e..9f7f78dcbe2 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -193,6 +193,7 @@ namespace Int32 minute = 0; /// range [0, 59] Int32 second = 0; /// range [0, 59] Int32 microsecond = 0; /// range [0, 999999] + UInt32 scale = 0; /// The microsecond scale of DateTime64. bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime bool hour_starts_at_1 = false; /// Whether the hour is clockhour @@ -221,6 +222,7 @@ namespace minute = 0; second = 0; microsecond = 0; + scale = 0; is_am = true; hour_starts_at_1 = false; @@ -599,7 +601,7 @@ namespace bool useDefaultImplementationForConstants() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; } bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } @@ -613,9 +615,9 @@ namespace if constexpr (return_type == ReturnType::DateTime64) { optional_args = { - {"precision or format", static_cast([](const IDataType & data_type) -> bool { + {"scale/format", static_cast([](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); - }), nullptr, "Number or String"}, + }), nullptr, "UInt or String"}, {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; @@ -631,24 +633,34 @@ namespace DataTypePtr data_type; if constexpr (return_type == ReturnType::DateTime64) { + UInt32 scale = 0; if (arguments.size() == 1) - return std::make_shared(0, time_zone_name); + { + /// In MySQL parse syntax, the scale of microseond is 6. + if constexpr (parse_syntax == ParseSyntax::MySQL) + scale = 6; + } else { - UInt32 precision = 0; if (isUInt(arguments[1].type)) { - const auto * col_precision = checkAndGetColumnConst(arguments[1].column.get()); - if (col_precision) - precision = col_precision->getValue(); + const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); + if (col_scale) + scale = col_scale->getValue(); else throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input precision value may exceed the max value of `DateTime64`: {}.", + "The input scale value may exceed the max value of `DateTime64`: {}.", maxPrecisionOfDateTime64); } - /// Construct the return type `DataTypDateTime64` with precision and time zone name. The precision value can be specified or be extracted - /// from the format string by computing how many 'S' characters are contained in the format's micorsceond fragment. - String format = getFormat(arguments, precision); + else + { + if constexpr (parse_syntax == ParseSyntax::MySQL) + scale = 6; + } + + /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted + /// from the format string by c how many 'S' characters are contained in the format's micorsceond fragment. + String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) { @@ -664,26 +676,27 @@ namespace else val++; } - /// If the precision is already specified by the second parameter, but it not equals the value that extract from the format string, - /// then we should throw an exception; If the precision is not specified, then we set its value as the extracted one. - if (val != 0 && precision != 0 && val != precision) + /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, + /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. + if (val != 0 && scale != 0 && val != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The precision of input format string {} not equals the given precision value {}.", + "The scale of input format string {} not equals the given scale value {}.", format, - precision); - else if (precision == 0 && val != 0) - precision = val; + scale); + else if (scale == 0 && val != 0) + scale = val; } - if (precision > maxPrecisionOfDateTime64) + if (scale > maxPrecisionOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The precision of the input format string {} exceed the max precision value {}.", + "The scale of the input format string {} exceed the max scale value {}.", format, maxPrecisionOfDateTime64); - data_type = std::make_shared(precision, time_zone_name); } + data_type = std::make_shared(scale, time_zone_name); } else data_type = std::make_shared(time_zone_name); + if (error_handling == ErrorHandling::Null) return std::make_shared(data_type); return data_type; @@ -729,15 +742,15 @@ namespace col_null_map = ColumnUInt8::create(input_rows_count, 0); Int64 multiplier = 0; - UInt32 precision = 0; + UInt32 scale = 0; if constexpr (return_type == ReturnType::DateTime64) { const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - precision = datatime64_type->getScale(); - multiplier = DecimalUtils::scaleMultiplier(precision); + scale = datatime64_type->getScale(); + multiplier = DecimalUtils::scaleMultiplier(scale); } - String format = getFormat(arguments, precision); + const String format = getFormat(arguments, scale); const auto & time_zone = getTimeZone(arguments); std::vector instructions = parseFormat(format); @@ -746,6 +759,9 @@ namespace for (size_t i = 0; i < input_rows_count; ++i) { datetime.reset(); + if constexpr (return_type == ReturnType::DateTime64) + datetime.scale = scale; + StringRef str_ref = col_str->getDataAt(i); Pos cur = str_ref.data; Pos end = str_ref.data + str_ref.size; @@ -787,7 +803,7 @@ namespace Int64OrError result = 0; /// Ensure all input was consumed when the return type is `DateTime`. - if (return_type == ReturnType::DateTime && cur < end) + if (cur < end) { result = tl::unexpected(ErrorCodeAndMessage( ErrorCodes::CANNOT_PARSE_DATETIME, @@ -938,6 +954,28 @@ namespace return cur; } + template + [[nodiscard]] + static PosOrError readNumber6(Pos cur, Pos end, [[maybe_unused]] const String & fragment, T & res) + { + if constexpr (need_check_space == NeedCheckSpace::Yes) + RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "readNumber6 requires size >= 6", fragment)) + + res = (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + return cur; + } + [[nodiscard]] static VoidOrError checkSpace(Pos cur, Pos end, size_t len, const String & msg, const String & fragment) { @@ -1358,13 +1396,18 @@ namespace } [[nodiscard]] - static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime & /*date*/) + static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime & date) { - RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "mysqlMicrosecond requires size >= 6", fragment)) - - for (size_t i = 0; i < 6; ++i) - ASSIGN_RESULT_OR_RETURN_ERROR(cur, (assertNumber(cur, end, fragment))) - + if (date.scale != 6) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the microsecond's scale {} is not 6", + fragment, + std::string_view(cur, end - cur), + std::to_string(date.scale)) + Int32 microsecond = 0; + ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumber6(cur, end, fragment, microsecond))) + RETURN_ERROR_IF_FAILED(date.setMicrosecond(microsecond)) return cur; } @@ -1775,7 +1818,7 @@ namespace return cur; } else - RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", read_time_zone) + RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to parse date time from timezone {}", read_time_zone) } [[nodiscard]] @@ -2223,7 +2266,7 @@ namespace } - String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 precision) const + String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 scale) const { size_t format_arg_index = 1; if constexpr (return_type == ReturnType::DateTime64) @@ -2247,15 +2290,17 @@ namespace format = "%Y-%m-%d %H:%i:%s"; else format = "yyyy-MM-dd HH:mm:ss"; - if (precision > 0) - format += "." + String(precision, 'S'); + if (scale > 0) + { + if constexpr (parse_syntax == ParseSyntax::MySQL) + format += ".%f"; + else + format += "." + String(scale, 'S'); + } return format; } else { - if (!arguments[format_arg_index].column || !isColumnConst(*arguments[format_arg_index].column)) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", format_arg_index, getName()); - const auto * col_format = checkAndGetColumnConst(arguments[format_arg_index].column.get()); if (!col_format) throw Exception( @@ -2269,18 +2314,24 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - size_t timezone_arg_index = 2; - if constexpr (return_type == ReturnType::DateTime64) - timezone_arg_index = 3; - - if (arguments.size() <= timezone_arg_index) + if (arguments.size() < 3) return DateLUT::instance(); - + else if constexpr (return_type == ReturnType::DateTime64) + { + /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: + /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') + /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the + /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as + /// `DateLUT::instance()`. + if (isUInt(arguments[1].type) && arguments.size() < 4) + return DateLUT::instance(); + } + size_t timezone_arg_index = arguments.size() - 1; const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) throw Exception( ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of third ('timezone') argument of function {}. Must be constant String.", + "Illegal column {} of ('timezone') argument of function {}. Must be constant String.", arguments[timezone_arg_index].column->getName(), getName()); diff --git a/tests/queries/0_stateless/03252_parse_datetime64.reference b/tests/queries/0_stateless/03252_parse_datetime64.reference new file mode 100644 index 00000000000..27dcef6bf68 --- /dev/null +++ b/tests/queries/0_stateless/03252_parse_datetime64.reference @@ -0,0 +1,17 @@ +2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000000 +1970-01-01 08:00:00.000 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123456 +\N +\N +\N +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +\N diff --git a/tests/queries/0_stateless/03252_parse_datetime64.sql b/tests/queries/0_stateless/03252_parse_datetime64.sql new file mode 100644 index 00000000000..d28b6e586f7 --- /dev/null +++ b/tests/queries/0_stateless/03252_parse_datetime64.sql @@ -0,0 +1,32 @@ +set session_timezone = 'Asia/Shanghai'; + +select parseDateTime64('2024-10-09 10:30:10.123456'); +select parseDateTime64('2024-10-09 10:30:10.123'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123', 6, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError NOT_ENOUGH_SPACE } + +select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123'); +select parseDateTime64OrZero('2024-10-09 10:30:10', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64OrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); + +select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123'); +select parseDateTime64OrNull('2024-10-09 10:30:10', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64OrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; +select parseDateTime64OrNull('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); \ No newline at end of file diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference index 063b76b152c..0b4a28c4b38 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference @@ -1,14 +1,26 @@ -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 -2024-10-10 02:30:10.123456 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 2024-10-10 01:30:10.123456 -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 -1970-01-01 08:00:00.000000000 -2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 +1970-01-01 08:00:00.000000 +2024-10-10 01:30:10.123456 +2024-10-10 01:30:10.123456 +1970-01-01 08:00:00.000000 +\N +\N +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 \N -2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 +2024-10-10 01:30:10.123456 +\N diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql index 9ea854bc324..8482677e9c9 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql @@ -1,19 +1,49 @@ set session_timezone = 'Asia/Shanghai'; -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -- { serverError CANNOT_PARSE_DATETIME } -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); -- {serverError CANNOT_PARSE_DATETIME} +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 3); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 3); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } \ No newline at end of file From 042e82c6a9cbfa97d68cebb10e88c412c435cd3b Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 7 Nov 2024 13:10:51 +0300 Subject: [PATCH 251/566] Fix tests --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 3 ++- .../03263_analyzer_materialized_view_cte_nested.reference | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index c0a2de0f125..c2eac8d008b 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -2971,7 +2971,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi break; } - auto * table_expression_table_node = table_expression->as(); + TableNode * table_expression_table_node = table_expression ? table_expression->as() : nullptr; + if (table_expression_table_node && table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) { diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference index e69de29bb2d..0cfbf08886f 100644 --- a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference @@ -0,0 +1 @@ +2 From e7ad525e0033e1a42cfe6ba35e2a9f0ecd2088b0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 20:03:14 +0000 Subject: [PATCH 252/566] Re-introduce support for legacy index creation syntax --- .../table-engines/mergetree-family/annindexes.md | 6 +++--- .../MergeTree/MergeTreeIndexVectorSimilarity.cpp | 6 ++++-- ...or_search_legacy_index_creation_syntax.reference | 0 ...4_vector_search_legacy_index_creation_syntax.sql | 13 +++++++++++++ 4 files changed, 20 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index dc12a60e8ef..fcdc16637e6 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -54,7 +54,7 @@ Parameters: - `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a line between two points in Euclidean space), or `cosineDistance` (the [cosine distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors). -- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`) +- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing vectors with reduced precision (optional, default: `bf16`) - `hnsw_max_connections_per_layer`: the number of neighbors per HNSW graph node, also known as `M` in the [HNSW paper](https://doi.org/10.1109/TPAMI.2018.2889473) (optional, default: 32) - `hnsw_candidate_list_size_for_construction`: the size of the dynamic candidate list when constructing the HNSW graph, also known as @@ -92,8 +92,8 @@ Vector similarity indexes currently support two distance functions: - `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). -Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`. -If no scalar kind was specified during index creation, `f16` is used as default. +Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16`, `bf16`, +and `i8`. If no scalar kind was specified during index creation, `bf16` is used as default. For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no distance function was specified during index creation, `L2Distance` is used as default. diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index f95b840e223..cca3ca6ce3b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -531,15 +531,17 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta { const bool has_two_args = (index.arguments.size() == 2); const bool has_five_args = (index.arguments.size() == 5); + const bool has_six_args = (index.arguments.size() == 6); /// Legacy index creation syntax before #70616. Supported only to be able to load old tables, can be removed mid-2025. + /// The 6th argument (ef_search) is ignored. /// Check number and type of arguments - if (!has_two_args && !has_five_args) + if (!has_two_args && !has_five_args && !has_six_args) throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or five arguments"); if (index.arguments[0].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (method) must be of type String"); if (index.arguments[1].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (metric) must be of type String"); - if (has_five_args) + if (has_five_args || has_six_args) { if (index.arguments[2].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Third argument of vector similarity index (quantization) must be of type String"); diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql new file mode 100644 index 00000000000..e5dbc6aa6a9 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests the legacy syntax to create vector similarity indexes before #70616. +-- Support for this syntax can be removed after mid-2025. + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 113)) ENGINE = MergeTree ORDER BY id; -- Note the 6th parameter: 133 + +DROP TABLE tab; + From cf594010c862a568b07a440c4d70f9d59319b1a7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 09:43:42 +0000 Subject: [PATCH 253/566] Rename some tests for more consistency --- ...=> 02354_vector_search_adaptive_index_granularity.reference} | 0 ...y.sql => 02354_vector_search_adaptive_index_granularity.sql} | 0 ...=> 02354_vector_search_and_other_skipping_indexes.reference} | 0 ...1.sql => 02354_vector_search_and_other_skipping_indexes.sql} | 2 +- ...ence => 02354_vector_search_different_array_sizes.reference} | 0 ..._sizes.sql => 02354_vector_search_different_array_sizes.sql} | 0 ...2354_vector_search_empty_arrays_or_default_values.reference} | 0 ...l => 02354_vector_search_empty_arrays_or_default_values.sql} | 2 +- ...reference => 02354_vector_search_multiple_indexes.reference} | 0 ...ple_indexes.sql => 02354_vector_search_multiple_indexes.sql} | 0 ...s.reference => 02354_vector_search_multiple_marks.reference} | 0 ...ultiple_marks.sql => 02354_vector_search_multiple_marks.sql} | 0 ...g_69085.reference => 02354_vector_search_subquery.reference} | 0 ...or_search_bug_69085.sql => 02354_vector_search_subquery.sql} | 2 +- 14 files changed, 3 insertions(+), 3 deletions(-) rename tests/queries/0_stateless/{02354_vector_search_bug_52282.reference => 02354_vector_search_adaptive_index_granularity.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_adaptive_index_granularity.sql => 02354_vector_search_adaptive_index_granularity.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_71381.reference => 02354_vector_search_and_other_skipping_indexes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_71381.sql => 02354_vector_search_and_other_skipping_indexes.sql} (79%) rename tests/queries/0_stateless/{02354_vector_search_bug_adaptive_index_granularity.reference => 02354_vector_search_different_array_sizes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_different_array_sizes.sql => 02354_vector_search_different_array_sizes.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_different_array_sizes.reference => 02354_vector_search_empty_arrays_or_default_values.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_52282.sql => 02354_vector_search_empty_arrays_or_default_values.sql} (80%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_indexes.reference => 02354_vector_search_multiple_indexes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_indexes.sql => 02354_vector_search_multiple_indexes.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_marks.reference => 02354_vector_search_multiple_marks.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_marks.sql => 02354_vector_search_multiple_marks.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_69085.reference => 02354_vector_search_subquery.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_69085.sql => 02354_vector_search_subquery.sql} (93%) diff --git a/tests/queries/0_stateless/02354_vector_search_bug_52282.reference b/tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_52282.reference rename to tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql b/tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql rename to tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.reference b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_71381.reference rename to tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql similarity index 79% rename from tests/queries/0_stateless/02354_vector_search_bug_71381.sql rename to tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql index 9e3246700b8..386d3b6e26e 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql +++ b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql @@ -2,7 +2,7 @@ SET allow_experimental_vector_similarity_index = 1; --- Issue #71381: Usage of vector similarity index and further skipping indexes on the same table +-- Usage of vector similarity index and further skipping indexes on the same table (issue #71381) DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference b/tests/queries/0_stateless/02354_vector_search_different_array_sizes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference rename to tests/queries/0_stateless/02354_vector_search_different_array_sizes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql b/tests/queries/0_stateless/02354_vector_search_different_array_sizes.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql rename to tests/queries/0_stateless/02354_vector_search_different_array_sizes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference rename to tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_52282.sql b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql similarity index 80% rename from tests/queries/0_stateless/02354_vector_search_bug_52282.sql rename to tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql index b8066ce278a..e24b1a527be 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_52282.sql +++ b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql @@ -2,7 +2,7 @@ SET allow_experimental_vector_similarity_index = 1; --- Issue #52258: Vector similarity indexes must reject empty Arrays or Arrays with default values +-- Vector similarity indexes must reject empty Arrays or Arrays with default values (issue #52258) DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference rename to tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql rename to tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference b/tests/queries/0_stateless/02354_vector_search_multiple_marks.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference rename to tests/queries/0_stateless/02354_vector_search_multiple_marks.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql b/tests/queries/0_stateless/02354_vector_search_multiple_marks.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql rename to tests/queries/0_stateless/02354_vector_search_multiple_marks.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.reference b/tests/queries/0_stateless/02354_vector_search_subquery.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_69085.reference rename to tests/queries/0_stateless/02354_vector_search_subquery.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql b/tests/queries/0_stateless/02354_vector_search_subquery.sql similarity index 93% rename from tests/queries/0_stateless/02354_vector_search_bug_69085.sql rename to tests/queries/0_stateless/02354_vector_search_subquery.sql index 4dbcdf66e36..65ad0dbcd97 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql +++ b/tests/queries/0_stateless/02354_vector_search_subquery.sql @@ -3,7 +3,7 @@ SET allow_experimental_vector_similarity_index = 1; SET enable_analyzer = 0; --- Issue #69085: Reference vector for vector search is computed by a subquery +-- Reference vector for vector search is computed by a subquery (issue #69085) DROP TABLE IF EXISTS tab; From be10aba49aca0d3253e4c714eabed196fe6411e2 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 10:42:51 +0000 Subject: [PATCH 254/566] Minor cleanup --- .../MergeTree/MergeTreeIndexVectorSimilarity.cpp | 9 +++------ src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h | 3 --- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index cca3ca6ce3b..0b17fa05072 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -178,23 +178,20 @@ String USearchIndexWithSerialization::Statistics::toString() const } MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_) - : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) + : MergeTreeIndexGranuleVectorSimilarity(index_name_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) { } MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_, USearchIndexWithSerializationPtr index_) : index_name(index_name_) - , index_sample_block(index_sample_block_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) , usearch_hnsw_params(usearch_hnsw_params_) @@ -261,7 +258,7 @@ MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilari MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, usearch_hnsw_params, index); + auto granule = std::make_shared(index_name, metric_kind, scalar_kind, usearch_hnsw_params, index); index = nullptr; return granule; } @@ -490,7 +487,7 @@ MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity( MergeTreeIndexGranulePtr MergeTreeIndexVectorSimilarity::createIndexGranule() const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); + return std::make_shared(index.name, metric_kind, scalar_kind, usearch_hnsw_params); } MergeTreeIndexAggregatorPtr MergeTreeIndexVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index 9a81e168393..fe5049daf77 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -69,14 +69,12 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu { MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_); MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_, @@ -90,7 +88,6 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu bool empty() const override { return !index || index->size() == 0; } const String index_name; - const Block index_sample_block; const unum::usearch::metric_kind_t metric_kind; const unum::usearch::scalar_kind_t scalar_kind; const UsearchHnswParams usearch_hnsw_params; From f229fc5b40bd0faa3f312bcdc3123cfdfb6a70fc Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 12:14:09 +0100 Subject: [PATCH 255/566] Deprecate CLICKHOUSE_UID/CLICKHOUSE_GID docker ENV --- docker/keeper/entrypoint.sh | 14 ++++++++------ docker/server/entrypoint.sh | 14 ++++++++------ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 68bd0ef9d87..c5d5d26ec11 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -9,13 +9,15 @@ if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then DO_CHOWN=0 fi -CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" -CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" - -# support --user +# support `docker run --user=xxx:xxxx` if [ "$(id -u)" = "0" ]; then - USER=$CLICKHOUSE_UID - GROUP=$CLICKHOUSE_GID + # CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility + if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: consider using a proper "--user=xxx:xxxx" running argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases' >&2 + fi + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" if command -v gosu &> /dev/null; then gosu="gosu $USER:$GROUP" elif command -v su-exec &> /dev/null; then diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 3102ab8297c..a60643c63f1 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -8,13 +8,15 @@ if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then DO_CHOWN=0 fi -CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" -CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" - -# support --user +# support `docker run --user=xxx:xxxx` if [ "$(id -u)" = "0" ]; then - USER=$CLICKHOUSE_UID - GROUP=$CLICKHOUSE_GID + # CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility + if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: consider using a proper "--user=xxx:xxxx" running argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases' >&2 + fi + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" else USER="$(id -u)" GROUP="$(id -g)" From b82658a28524f47356ab63a3c367489e10c83791 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 12:16:19 +0100 Subject: [PATCH 256/566] Remove processing of CLICKHOUSE_DOCKER_RESTART_ON_EXIT --- docker/server/entrypoint.sh | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index a60643c63f1..6aa031b1352 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -205,18 +205,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0} export CLICKHOUSE_WATCHDOG_ENABLE - # An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes. - # For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running. - if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then - while true; do - # This runs the server as a child process of the shell script: - /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||: - echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.' - done - else - # This replaces the shell script with the server: - exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" - fi + # This replaces the shell script with the server: + exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" fi # Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image From ae97149041d2c489617f242ce2c96648c98ae620 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 12:18:11 +0100 Subject: [PATCH 257/566] Remove `/usr/bin` for clickhouse/clickhouse-server/clickhouse-keeper --- docker/keeper/entrypoint.sh | 4 ++-- docker/server/entrypoint.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index c5d5d26ec11..92b91a0f8c3 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -84,11 +84,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then # There is a config file. It is already tested with gosu (if it is readably by keeper user) if [ -f "$KEEPER_CONFIG" ]; then - exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@" + exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@" fi # There is no config file. Will use embedded one - exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" + exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" fi # Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 6aa031b1352..7a990e7d889 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -62,7 +62,7 @@ function create_directory_and_do_chown() { # if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions # it mainly happens on NFS mounts where root==nobody for security reasons # thus mkdir MUST run with user id/gid and not from nobody that has zero permissions - mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir" + mkdir="clickhouse su ""${USER}:${GROUP}"" mkdir" fi if ! $mkdir -p "$dir"; then echo "Couldn't create necessary directory: $dir" @@ -145,7 +145,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then fi # Listen only on localhost until the initialization is done - /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & + clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & pid="$!" # check if clickhouse is ready to accept connections @@ -206,7 +206,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then export CLICKHOUSE_WATCHDOG_ENABLE # This replaces the shell script with the server: - exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" + exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" fi # Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image From 1babb919c3450969b2ecc810705854e702458110 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 12:18:57 +0100 Subject: [PATCH 258/566] Follow the DOI review recommendations/requirements --- docker/server/Dockerfile.ubuntu | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 0d5c983f5e6..2b023a9cf03 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -91,7 +91,6 @@ RUN if [ -n "${single_binary_location_url}" ]; then \ RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \ apt-get update \ && apt-get install --yes --no-install-recommends \ - apt-transport-https \ dirmngr \ gnupg2 \ && mkdir -p /etc/apt/sources.list.d \ @@ -108,13 +107,12 @@ RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \ && for package in ${PACKAGES}; do \ packages="${packages} ${package}=${VERSION}" \ ; done \ - && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ + && apt-get install --yes --no-install-recommends ${packages} || exit 1 \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ /tmp/* \ - && apt-get autoremove --purge -yq libksba8 \ - && apt-get autoremove -yq \ + && apt-get autoremove --purge -yq dirmngr gnupg2 \ ; fi # post install @@ -126,8 +124,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \ RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 ENV TZ UTC RUN mkdir /docker-entrypoint-initdb.d From a6b08187b31b3d3d6a5432bf8f39fe81ab5d81a7 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 20:03:44 +0800 Subject: [PATCH 259/566] checkstyle and doc --- .../functions/type-conversion-functions.md | 46 ++++++++++++++++++- src/Functions/parseDateTime.cpp | 27 +++++------ 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 91bae2fe9da..c44d9ddb12b 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6867,9 +6867,53 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed. +## parseDateTime64 + +Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format). + +**Syntax** + +``` sql +parseDateTime64(str[, [scale, [format[, timezone]]]]) +``` + +**Arguments** + +- `str` — The String to be parsed +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. +- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. + +**Returned value(s)** + +Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a MySQL style format string. + +## parseDateTime64OrZero +Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. + +## parseDateTime64OrNull +Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. + ## parseDateTime64InJodaSyntax -Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md). +Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html). + +**Syntax** + +``` sql +parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) +``` + +**Arguments** + +- `str` — The String to be parsed +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. +- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. + +**Returned value(s)** + +Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a joda style format string. ## parseDateTime64InJodaSyntaxOrZero diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 9f7f78dcbe2..5743278e104 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -613,15 +613,12 @@ namespace FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - { - optional_args = { - {"scale/format", static_cast([](const IDataType & data_type) -> bool { - return isUInt(data_type) || isString(data_type); - }), nullptr, "UInt or String"}, + optional_args = {{"scale/format", static_cast( + [](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); } + ), nullptr, "UInt or String"}, {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; - } else optional_args = { {"format", static_cast(&isString), nullptr, "String"}, @@ -659,7 +656,7 @@ namespace } /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by c how many 'S' characters are contained in the format's micorsceond fragment. + /// from the format string by counting how many 'S' characters are contained in the format's micorsceond fragment. String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) @@ -676,7 +673,7 @@ namespace else val++; } - /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, + /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. if (val != 0 && scale != 0 && val != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -687,7 +684,7 @@ namespace scale = val; } if (scale > maxPrecisionOfDateTime64) - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale of the input format string {} exceed the max scale value {}.", format, maxPrecisionOfDateTime64); @@ -709,7 +706,7 @@ namespace non_null_result_type = removeNullable(result_type); else non_null_result_type = result_type; - + if constexpr (return_type == ReturnType::DateTime64) { const auto * datatime64_type = checkAndGetDataType(non_null_result_type.get()); @@ -726,7 +723,7 @@ namespace } template - ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, + ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, MutableColumnPtr & col_res, PaddedPODArray & res_data) const { const auto * col_str = checkAndGetColumn(arguments[0].column.get()); @@ -736,7 +733,7 @@ namespace "Illegal column {} of first ('str') argument of function {}. Must be string.", arguments[0].column->getName(), getName()); - + ColumnUInt8::MutablePtr col_null_map; if constexpr (error_handling == ErrorHandling::Null) col_null_map = ColumnUInt8::create(input_rows_count, 0); @@ -802,7 +799,7 @@ namespace Int64OrError result = 0; - /// Ensure all input was consumed when the return type is `DateTime`. + /// Ensure all input was consumed. if (cur < end) { result = tl::unexpected(ErrorCodeAndMessage( @@ -2273,7 +2270,7 @@ namespace { /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated /// as default value `yyyy-MM-dd HH:mm:ss`. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, /// then the second argument is the format. /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, /// then the third argument is the format. @@ -2321,7 +2318,7 @@ namespace /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the - /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as + /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as /// `DateLUT::instance()`. if (isUInt(arguments[1].type) && arguments.size() < 4) return DateLUT::instance(); From 552b0fc8d0f106db1a85805ab883debe7e491e9c Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 13:11:33 +0100 Subject: [PATCH 260/566] Rename a setting --- src/Core/Settings.cpp | 3 ++- src/IO/ReadSettings.h | 2 +- src/Interpreters/Cache/QueryLimit.cpp | 2 +- src/Interpreters/Context.cpp | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index c2ffc2ddf0e..d9668849fd2 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4852,7 +4852,7 @@ Allows to record the filesystem caching log for each query DECLARE(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, R"( Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency. )", 0) \ - DECLARE(Bool, skip_download_if_exceeds_query_cache, true, R"( + DECLARE(Bool, filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit, true, R"( Skip download from remote filesystem if exceeds query cache size )", 0) \ DECLARE(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), R"( @@ -5887,6 +5887,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, Bool, use_mysql_types_in_show_columns, false) \ MAKE_OBSOLETE(M, Bool, s3queue_allow_experimental_sharded_mode, false) \ MAKE_OBSOLETE(M, LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW) \ + MAKE_OBSOLETE(M, Bool, skip_download_if_exceeds_query_cache, true) \ /* moved to config.xml: see also src/Core/ServerSettings.h */ \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \ diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 6ed02212095..103ce7df54b 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -68,7 +68,7 @@ struct ReadSettings std::shared_ptr page_cache; size_t filesystem_cache_max_download_size = (128UL * 1024 * 1024 * 1024); - bool skip_download_if_exceeds_query_cache = true; + bool filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit = true; size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE; diff --git a/src/Interpreters/Cache/QueryLimit.cpp b/src/Interpreters/Cache/QueryLimit.cpp index b18d23a5b7f..a7c964022a5 100644 --- a/src/Interpreters/Cache/QueryLimit.cpp +++ b/src/Interpreters/Cache/QueryLimit.cpp @@ -53,7 +53,7 @@ FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::getOrSetQueryContext( { it->second = std::make_shared( settings.filesystem_cache_max_download_size, - !settings.skip_download_if_exceeds_query_cache); + !settings.filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit); } return it->second; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c1fa2c8549a..7b7cdfa2104 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -236,7 +236,7 @@ namespace Setting extern const SettingsUInt64 remote_fs_read_backoff_max_tries; extern const SettingsUInt64 remote_read_min_bytes_for_seek; extern const SettingsBool throw_on_error_from_cache_on_write_operations; - extern const SettingsBool skip_download_if_exceeds_query_cache; + extern const SettingsBool filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit; extern const SettingsBool s3_allow_parallel_part_upload; extern const SettingsBool use_page_cache_for_disks_without_file_cache; extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; @@ -5753,7 +5753,7 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; - res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; + res.filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit = settings_ref[Setting::filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit]; res.page_cache = getPageCache(); res.use_page_cache_for_disks_without_file_cache = settings_ref[Setting::use_page_cache_for_disks_without_file_cache]; From 3332bce1dc94e7fddccc1865df01eb029e5f7e52 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 20:38:44 +0800 Subject: [PATCH 261/566] fix doc and comments --- .../functions/type-conversion-functions.md | 6 +++--- src/Functions/parseDateTime.cpp | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index c44d9ddb12b..8043b21744a 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6880,7 +6880,7 @@ parseDateTime64(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 6 if not specified. - `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. @@ -6892,7 +6892,7 @@ Returns [DateTime64](../data-types/datetime64.md) type values parsed from input Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. ## parseDateTime64OrNull -Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. +Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed. ## parseDateTime64InJodaSyntax @@ -6907,7 +6907,7 @@ parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 0 if not specified. - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 5743278e104..7190c1ad6f8 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -65,7 +65,7 @@ namespace constexpr Int32 minYear = 1970; constexpr Int32 maxYear = 2106; - constexpr Int32 maxPrecisionOfDateTime64 = 6; + constexpr Int32 maxScaleOfDateTime64 = 6; const std::unordered_map> dayOfWeekMap{ {"mon", {"day", 1}}, @@ -193,7 +193,7 @@ namespace Int32 minute = 0; /// range [0, 59] Int32 second = 0; /// range [0, 59] Int32 microsecond = 0; /// range [0, 999999] - UInt32 scale = 0; /// The microsecond scale of DateTime64. + UInt32 scale = 0; /// The scale of DateTime64, range [0, 6]. bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime bool hour_starts_at_1 = false; /// Whether the hour is clockhour @@ -646,8 +646,8 @@ namespace scale = col_scale->getValue(); else throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input scale value may exceed the max value of `DateTime64`: {}.", - maxPrecisionOfDateTime64); + "The input scale value may exceed the max scale value of `DateTime64`: {}.", + maxScaleOfDateTime64); } else { @@ -656,7 +656,7 @@ namespace } /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by counting how many 'S' characters are contained in the format's micorsceond fragment. + /// from the format string by counting how many 'S' characters are contained in the format's microsceond fragment. String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) @@ -683,11 +683,11 @@ namespace else if (scale == 0 && val != 0) scale = val; } - if (scale > maxPrecisionOfDateTime64) + if (scale > maxScaleOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale of the input format string {} exceed the max scale value {}.", format, - maxPrecisionOfDateTime64); + maxScaleOfDateTime64); } data_type = std::make_shared(scale, time_zone_name); } @@ -1398,7 +1398,7 @@ namespace if (date.scale != 6) RETURN_ERROR( ErrorCodes::CANNOT_PARSE_DATETIME, - "Unable to parse fragment {} from {} because of the microsecond's scale {} is not 6", + "Unable to parse fragment {} from {} because of the datetime scale {} is not 6", fragment, std::string_view(cur, end - cur), std::to_string(date.scale)) From d8ff6f868fe6cb346ac751b468b462b857399480 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Thu, 7 Nov 2024 12:36:21 +0000 Subject: [PATCH 262/566] bitShift: return 0 instead of throwing an exception if overflow --- src/Functions/bitShiftLeft.cpp | 20 +++++++++++-------- src/Functions/bitShiftRight.cpp | 20 +++++++++++-------- .../02766_bitshift_with_const_arguments.sql | 2 +- ...t_throws_error_for_out_of_bounds.reference | 6 ++++++ ...t_shift_throws_error_for_out_of_bounds.sql | 12 +++++------ 5 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/Functions/bitShiftLeft.cpp b/src/Functions/bitShiftLeft.cpp index 0eb0d82ef0f..7fd0f7cf631 100644 --- a/src/Functions/bitShiftLeft.cpp +++ b/src/Functions/bitShiftLeft.cpp @@ -25,8 +25,10 @@ struct BitShiftLeftImpl { if constexpr (is_big_int_v) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BitShiftLeft is not implemented for big integers as second argument"); - else if (b < 0 || static_cast(b) > 8 * sizeof(A)) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); + else if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + else if (static_cast(b) > 8 * sizeof(A)) + return static_cast(0); else if constexpr (is_big_int_v) return static_cast(a) << static_cast(b); else @@ -43,9 +45,10 @@ struct BitShiftLeftImpl const UInt8 word_size = 8 * sizeof(*pos); size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.push_back(0); @@ -111,9 +114,10 @@ struct BitShiftLeftImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.resize_fill(out_vec.size() + n); diff --git a/src/Functions/bitShiftRight.cpp b/src/Functions/bitShiftRight.cpp index 16032b32f68..19ea7b8c751 100644 --- a/src/Functions/bitShiftRight.cpp +++ b/src/Functions/bitShiftRight.cpp @@ -26,8 +26,10 @@ struct BitShiftRightImpl { if constexpr (is_big_int_v) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BitShiftRight is not implemented for big integers as second argument"); - else if (b < 0 || static_cast(b) > 8 * sizeof(A)) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); + else if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + else if (static_cast(b) > 8 * sizeof(A)) + return static_cast(0); else if constexpr (is_big_int_v) return static_cast(a) >> static_cast(b); else @@ -59,9 +61,10 @@ struct BitShiftRightImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { /// insert default value out_vec.push_back(0); @@ -99,9 +102,10 @@ struct BitShiftRightImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.resize_fill(out_vec.size() + n); diff --git a/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql b/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql index 91e8624057c..6b2961f0555 100644 --- a/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql +++ b/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql @@ -10,7 +10,7 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t0 (vkey UInt32, pkey UInt32, c0 UInt32) engine = TinyLog; CREATE TABLE t1 (vkey UInt32) ENGINE = AggregatingMergeTree ORDER BY vkey; INSERT INTO t0 VALUES (15, 25000, 58); -SELECT ref_5.pkey AS c_2_c2392_6 FROM t0 AS ref_5 WHERE 'J[' < multiIf(ref_5.pkey IN ( SELECT 1 ), bitShiftLeft(multiIf(ref_5.c0 > NULL, '1', ')'), 40), NULL); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT ref_5.pkey AS c_2_c2392_6 FROM t0 AS ref_5 WHERE 'J[' < multiIf(ref_5.pkey IN ( SELECT 1 ), bitShiftLeft(multiIf(ref_5.c0 > NULL, '1', ')'), 40), NULL); DROP TABLE t0; DROP TABLE t1; diff --git a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference index 33b8cd6ee26..1fda82a9747 100644 --- a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference +++ b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference @@ -1,3 +1,9 @@ -- bitShiftRight +0 + +\0\0\0\0\0\0\0\0 -- bitShiftLeft +0 + +\0\0\0\0\0\0\0\0 OK diff --git a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql index aec01753673..340cc1292e4 100644 --- a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql +++ b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql @@ -1,17 +1,17 @@ SELECT '-- bitShiftRight'; SELECT bitShiftRight(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight(toUInt8(1), 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toUInt8(1), 8 + 1); SELECT bitShiftRight('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight('hola', 4 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight('hola', 4 * 8 + 1); SELECT bitShiftRight(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8 + 1); SELECT '-- bitShiftLeft'; SELECT bitShiftLeft(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft(toUInt8(1), 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toUInt8(1), 8 + 1); SELECT bitShiftLeft('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft('hola', 4 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft('hola', 4 * 8 + 1); SELECT bitShiftLeft(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8 + 1); SELECT 'OK'; \ No newline at end of file From f727a3931bfa0d7b3945bfb8703665aef3fc0695 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 12:41:48 +0000 Subject: [PATCH 263/566] Clarify query cache docs and remove obsolete setting --- docs/en/operations/query-cache.md | 23 +++++++++++------------ src/Core/Settings.cpp | 1 - 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 955cec0234e..f0941aa28aa 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be served directly from the query cache. In this example, a reasonable validity period could be 30 min. -Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, -the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. -This reduces maintenance effort and avoids redundancy. +Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g. +[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and +configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance +effort and avoids redundancy. ## Configuration Settings and Usage @@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs). Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different -value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). +value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query +cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry +is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the +case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted. Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). @@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa To force caching of results of queries with non-deterministic functions regardless, use setting [query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling). -Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force -caching of results of queries with system tables regardless, use setting -[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). - -:::note -Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether -results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect. -::: +Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or +[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with +system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index c2ffc2ddf0e..3bfa58e4f98 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5916,7 +5916,6 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, UInt64, parallel_replicas_min_number_of_granules_to_enable, 0) \ MAKE_OBSOLETE(M, ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT) \ MAKE_OBSOLETE(M, Bool, query_plan_optimize_projection, true) \ - MAKE_OBSOLETE(M, Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ From ca23e5254c2cca5e6b3f4a9c7ccd65f70be42fc4 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 7 Nov 2024 12:44:57 +0000 Subject: [PATCH 264/566] Fix for tmp parts --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..fb934a77512 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2501,7 +2501,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co { const size_t total_mark = getMarksCount(); /// If column doesn't have dynamic subcolumns or part has no data, just create column using it's type. - if (!column.type->hasDynamicSubcolumns() || !total_mark) + if (is_temp || !column.type->hasDynamicSubcolumns() || !total_mark) return column.type->createColumn(); /// Otherwise, read sample column with 0 rows from the part, so it will load dynamic structure. @@ -2510,22 +2510,24 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co StorageMetadataPtr metadata_ptr = storage.getInMemoryMetadataPtr(); StorageSnapshotPtr storage_snapshot_ptr = std::make_shared(storage, metadata_ptr); + MergeTreeReaderSettings settings; + settings.can_read_part_without_marks = true; MergeTreeReaderPtr reader = getReader( cols, storage_snapshot_ptr, - MarkRanges{MarkRange(0, 1)}, + MarkRanges{MarkRange(0, total_mark)}, /*virtual_fields=*/ {}, /*uncompressed_cache=*/{}, storage.getContext()->getMarkCache().get(), std::make_shared(), - MergeTreeReaderSettings{}, + settings, ValueSizeMap{}, ReadBufferFromFileBase::ProfileCallback{}); Columns result; result.resize(1); - reader->readRows(0, 1, false, 0, result); + reader->readRows(0, total_mark, false, 0, result); return result[0]; } From d43329f254eaaddaece94d4f96631b3307be23bb Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 5 Nov 2024 13:31:10 +0100 Subject: [PATCH 265/566] UX: slightly improve cache await interface --- tests/ci/ci_cache.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 6f2e3e70736..5ebed827926 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -795,11 +795,12 @@ class CiCache: # start waiting for the next TIMEOUT seconds if there are more than X(=4) jobs to wait # wait TIMEOUT seconds in rounds. Y(=5) is the max number of rounds expired_sec = 0 - start_at = int(time.time()) + start_at = time.time() while expired_sec < TIMEOUT and self.jobs_to_wait: await_finished: Set[str] = set() if not dry_run: - time.sleep(poll_interval_sec) + # Do not sleep longer than required + time.sleep(min(poll_interval_sec, TIMEOUT - expired_sec)) self.update() for job_name, job_config in self.jobs_to_wait.items(): num_batches = job_config.num_batches @@ -844,7 +845,8 @@ class CiCache: del self.jobs_to_wait[job] if not dry_run: - expired_sec = int(time.time()) - start_at + # Avoid `seconds left [-3]` + expired_sec = min(int(time.time() - start_at), TIMEOUT) print( f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" ) From ccaa66963dfa937f6a2562ff22d9b90254fefea3 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 5 Nov 2024 13:37:35 +0100 Subject: [PATCH 266/566] Print a proper message for finished awaiting --- tests/ci/ci_cache.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 5ebed827926..c271339db8b 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -845,11 +845,12 @@ class CiCache: del self.jobs_to_wait[job] if not dry_run: - # Avoid `seconds left [-3]` - expired_sec = min(int(time.time() - start_at), TIMEOUT) - print( - f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" - ) + expired_sec = int(time.time() - start_at) + msg = f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" + if expired_sec >= TIMEOUT: + # Avoid `seconds left [-3]` + msg = f"awaiting for round {round_cnt} is finished" + print(msg) else: # make up for 2 iterations in dry_run expired_sec += int(TIMEOUT / 2) + 1 From 07b480c1e4e1f1fd647c4c9cf7d00e29b5619868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 14:44:31 +0100 Subject: [PATCH 267/566] Implicitly treat a file argument as --queries-file --- programs/main.cpp | 40 +++++++++---------- src/Client/ClientBaseOptimizedParts.cpp | 8 +++- ...al_arguments_implicit_query_file.reference | 11 +++++ ...ositional_arguments_implicit_query_file.sh | 34 ++++++++++++++++ 4 files changed, 71 insertions(+), 22 deletions(-) create mode 100644 tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference create mode 100755 tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh diff --git a/programs/main.cpp b/programs/main.cpp index 02ea1471108..ea8fbc1aece 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -1,27 +1,22 @@ -#include -#include +#include +#include +#include +#include -#include -#include -#include -#include -#include -#include /// pair - -#include +#if defined(SANITIZE_COVERAGE) +# include +#endif #include "config.h" #include "config_tools.h" -#include -#include -#include -#include -#include - -#include -#include - +#include +#include +#include +#include +#include +#include /// pair +#include /// Universal executable for various clickhouse applications int mainEntryClickHouseServer(int argc, char ** argv); @@ -238,9 +233,12 @@ int main(int argc_, char ** argv_) /// clickhouse # spawn local /// clickhouse local # spawn local /// clickhouse "select ..." # spawn local + /// clickhouse /tmp/repro --enable-analyzer /// - if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-' - || std::string_view(argv[1]).contains(' '))) + std::error_code ec; + if (main_func == printHelp && !argv.empty() + && (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ') + || std::filesystem::exists(std::filesystem::path{argv[1]}, ec))) { main_func = mainEntryClickHouseLocal; } diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index ac4d3417779..bc362288079 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -1,5 +1,7 @@ #include +#include + namespace DB { @@ -107,6 +109,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de && !op.original_tokens[0].empty() && !op.value.empty()) { /// Two special cases for better usability: + /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" /// These are relevant for interactive usage - user-friendly, but questionable in general. /// In case of ambiguity or for scripts, prefer using proper options. @@ -115,7 +118,10 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de po::variable_value value(boost::any(op.value), false); const char * option; - if (token.contains(' ')) + std::error_code ec; + if (std::filesystem::exists(std::filesystem::path{token}, ec)) + option = "queries-file"; + else if (token.contains(' ')) option = "query"; else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference new file mode 100644 index 00000000000..fe2432a063f --- /dev/null +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference @@ -0,0 +1,11 @@ +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +max_local_read_bandwidth 1 100 +max_local_read_bandwidth 1 200 diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh new file mode 100755 index 00000000000..14b6e735a9a --- /dev/null +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh @@ -0,0 +1,34 @@ +# Tags: no-random-settings + +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +FILE=${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}_without_extension +echo "SELECT 'Hello from a file'" > ${FILE} + +# Queries can be read from a file. +${CLICKHOUSE_BINARY} --queries-file ${FILE} + +# Or from stdin. +${CLICKHOUSE_BINARY} < ${FILE} + +# Also the positional argument can be interpreted as a file. +${CLICKHOUSE_BINARY} ${FILE} + +${CLICKHOUSE_LOCAL} --queries-file ${FILE} +${CLICKHOUSE_LOCAL} < ${FILE} +${CLICKHOUSE_LOCAL} ${FILE} + +${CLICKHOUSE_CLIENT} --queries-file ${FILE} +${CLICKHOUSE_CLIENT} < ${FILE} +${CLICKHOUSE_CLIENT} ${FILE} + +# Check that positional arguments work in any place +echo "Select name, changed, value FROM system.settings where name = 'max_local_read_bandwidth'" > ${FILE} +${CLICKHOUSE_BINARY} ${FILE} --max-local-read-bandwidth 100 +${CLICKHOUSE_BINARY} --max-local-read-bandwidth 200 ${FILE} + +rm ${FILE} From 06b580777e6ee8ef95cfa261b0a745ddda2662f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 15:08:05 +0100 Subject: [PATCH 268/566] Style --- .../03267_positional_arguments_implicit_query_file.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh index 14b6e735a9a..791aa3af0db 100755 --- a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh @@ -1,6 +1,5 @@ -# Tags: no-random-settings - #!/usr/bin/env bash +# Tags: no-random-settings CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 5cc42571f326ac409abdf612278042c84c4e3a74 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 14:57:24 +0000 Subject: [PATCH 269/566] Revert obsolete settings removal --- src/Core/Settings.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 3bfa58e4f98..0d322f107de 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5859,7 +5859,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs // Please add settings related to formats in Core/FormatFactorySettings.h, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS. #define OBSOLETE_SETTINGS(M, ALIAS) \ - /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ + /** Obsolete settings which are kept around for compatibility reasons. They have no effect anymore. */ \ MAKE_OBSOLETE(M, Bool, update_insert_deduplication_token_in_dependent_materialized_views, 0) \ MAKE_OBSOLETE(M, UInt64, max_memory_usage_for_all_queries, 0) \ MAKE_OBSOLETE(M, UInt64, multiple_joins_rewriter_version, 0) \ @@ -5916,6 +5916,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, UInt64, parallel_replicas_min_number_of_granules_to_enable, 0) \ MAKE_OBSOLETE(M, ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT) \ MAKE_OBSOLETE(M, Bool, query_plan_optimize_projection, true) \ + MAKE_OBSOLETE(M, Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ From de03a5dae75b06520ab19a5fd34a561f83ae74e2 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 15:04:53 +0000 Subject: [PATCH 270/566] Fix test which used an obsolete setting --- tests/queries/0_stateless/02494_query_cache_normalize_ast.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql index 1dbb3ef8158..cb53c4db7de 100644 --- a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql +++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql @@ -7,7 +7,7 @@ SYSTEM DROP QUERY CACHE; -- Run query whose result gets cached in the query cache. -- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care -- *that* they are passed and not about their effect. -SELECT 1 SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; +SELECT 1 SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save', max_threads = 16; -- Check that entry in QC exists SELECT COUNT(*) FROM system.query_cache; From a01c2e3f8c265aceb3042cdee1abafeed4f68485 Mon Sep 17 00:00:00 2001 From: Pervakov Grigorii Date: Thu, 7 Nov 2024 16:51:53 +0300 Subject: [PATCH 271/566] Keep materialized view security overriden context until end of query --- src/Processors/Sinks/SinkToStorage.h | 4 ++++ src/Storages/StorageMaterializedView.cpp | 2 ++ ...67_materialized_view_keeps_security_context.reference | 1 + .../03267_materialized_view_keeps_security_context.sql | 9 +++++++++ 4 files changed, 16 insertions(+) create mode 100644 tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference create mode 100644 tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql diff --git a/src/Processors/Sinks/SinkToStorage.h b/src/Processors/Sinks/SinkToStorage.h index c728fa87b1e..4bdcb2fe855 100644 --- a/src/Processors/Sinks/SinkToStorage.h +++ b/src/Processors/Sinks/SinkToStorage.h @@ -5,6 +5,8 @@ namespace DB { +class Context; + /// Sink which is returned from Storage::write. class SinkToStorage : public ExceptionKeepingTransform { @@ -16,12 +18,14 @@ public: const Block & getHeader() const { return inputs.front().getHeader(); } void addTableLock(const TableLockHolder & lock) { table_locks.push_back(lock); } + void addInterpreterContext(std::shared_ptr context) { interpreter_context.emplace_back(std::move(context)); } protected: virtual void consume(Chunk & chunk) = 0; private: std::vector table_locks; + std::vector> interpreter_context; void onConsume(Chunk chunk) override; GenerateResult onGenerate() override; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d047b28e076..3289ff1ae25 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -382,6 +382,7 @@ void StorageMaterializedView::read( } query_plan.addStorageHolder(storage); + query_plan.addInterpreterContext(context); query_plan.addTableLock(std::move(lock)); } } @@ -405,6 +406,7 @@ SinkToStoragePtr StorageMaterializedView::write(const ASTPtr & query, const Stor auto sink = storage->write(query, metadata_snapshot, context, async_insert); + sink->addInterpreterContext(context); sink->addTableLock(lock); return sink; } diff --git a/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql new file mode 100644 index 00000000000..bb44e4920af --- /dev/null +++ b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.rview; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.wview; + +-- Read from view +CREATE MATERIALIZED VIEW rview ENGINE = File(CSV) POPULATE AS SELECT 1 AS c0; +SELECT 1 FROM rview; + +-- Write through view populate +CREATE MATERIALIZED VIEW wview ENGINE = Join(ALL, INNER, c0) POPULATE AS SELECT 1 AS c0; From 96b59a2ef679b6b23ffcecafd59c05a0ea784ada Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 13:43:58 +0100 Subject: [PATCH 272/566] Avoid port clash in CoordinationTest/0.TestSummingRaft1 --- src/Coordination/tests/gtest_coordination.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 9648fdd4530..c56e698766a 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -330,7 +330,7 @@ TYPED_TEST(CoordinationTest, TestSummingRaft1) this->setLogDirectory("./logs"); this->setStateFileDirectory("."); - SummingRaftServer s1(1, "localhost", 44444, this->keeper_context); + SummingRaftServer s1(1, "localhost", 0, this->keeper_context); SCOPE_EXIT(if (std::filesystem::exists("./state")) std::filesystem::remove("./state");); /// Single node is leader From e5fc37bc7e6c707cd7ea14bb3c4888f94118a126 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 17:27:51 +0100 Subject: [PATCH 273/566] Add alias --- src/Core/Settings.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index d9668849fd2..328f950da1d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4854,7 +4854,7 @@ Allow to use the filesystem cache in passive mode - benefit from the existing ca )", 0) \ DECLARE(Bool, filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit, true, R"( Skip download from remote filesystem if exceeds query cache size -)", 0) \ +)", 0) ALIAS(skip_download_if_exceeds_query_cache) \ DECLARE(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), R"( Max remote filesystem cache size that can be downloaded by a single query )", 0) \ @@ -5887,7 +5887,6 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, Bool, use_mysql_types_in_show_columns, false) \ MAKE_OBSOLETE(M, Bool, s3queue_allow_experimental_sharded_mode, false) \ MAKE_OBSOLETE(M, LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW) \ - MAKE_OBSOLETE(M, Bool, skip_download_if_exceeds_query_cache, true) \ /* moved to config.xml: see also src/Core/ServerSettings.h */ \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \ From bfad05ac60b90bf7b4000cf6f87b54730ce108a5 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 7 Nov 2024 17:35:10 +0100 Subject: [PATCH 274/566] Shrink to fit index granularity array in memory to reduce memory footprint --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 ++ src/Storages/MergeTree/MergeTreeIndexGranularity.cpp | 6 ++++++ src/Storages/MergeTree/MergeTreeIndexGranularity.h | 2 ++ 3 files changed, 10 insertions(+) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 41783ffddb0..7453d609fa9 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -735,7 +735,9 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks loadUUID(); loadColumns(require_columns_checksums); loadChecksums(require_columns_checksums); + loadIndexGranularity(); + index_granularity.shrinkToFitInMemory(); if (!(*storage.getSettings())[MergeTreeSetting::primary_key_lazy_load]) getIndex(); diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index d69a00643f0..c3e740bde84 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -122,4 +122,10 @@ std::string MergeTreeIndexGranularity::describe() const { return fmt::format("initialized: {}, marks_rows_partial_sums: [{}]", initialized, fmt::join(marks_rows_partial_sums, ", ")); } + +void MergeTreeIndexGranularity::shrinkToFitInMemory() +{ + marks_rows_partial_sums.shrink_to_fit(); +} + } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index f66e721ec1e..9b8375dd2d8 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -100,6 +100,8 @@ public: void resizeWithFixedGranularity(size_t size, size_t fixed_granularity); std::string describe() const; + + void shrinkToFitInMemory(); }; } From 95d821549106ecff95e6e42e19b014aa6ac0e669 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 17:34:52 +0100 Subject: [PATCH 275/566] Fix --- src/Interpreters/Cache/FileCache.cpp | 21 +++++++++++++++++++-- tests/config/config.d/storage_conf.xml | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index f7b7ffc5aea..7de3f7af78d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -37,6 +37,11 @@ namespace ProfileEvents extern const Event FilesystemCacheFailToReserveSpaceBecauseOfCacheResize; } +namespace CurrentMetrics +{ + extern const Metric FilesystemCacheDownloadQueueElements; +} + namespace DB { @@ -918,7 +923,13 @@ bool FileCache::tryReserve( if (!query_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space for query limit"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space for query limit " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } @@ -933,7 +944,13 @@ bool FileCache::tryReserve( if (!main_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 74bad7528c8..fee7ce841a6 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -27,6 +27,7 @@ 0.3 0.15 0.15 + 50 0 From 2c59fce5b488c9ddd2d99e0dcbaaf84d2f36ef04 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:44:41 +0100 Subject: [PATCH 276/566] Update test.py --- tests/integration/test_storage_s3_queue/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index c495fc1d44f..284b304c632 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1403,8 +1403,8 @@ def test_shards_distributed(started_cluster, mode, processing_threads): # A unique path is necessary for repeatable tests keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" - files_to_generate = 300 - row_num = 300 + files_to_generate = 600 + row_num = 1000 total_rows = row_num * files_to_generate shards_num = 2 From 45aaebc41a73131c4ceee63214afbc88104dd59f Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 7 Nov 2024 18:24:36 +0100 Subject: [PATCH 277/566] Review fix --- src/Storages/MergeTree/MergedBlockOutputStream.cpp | 2 ++ src/Storages/MergeTree/MutateTask.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 77c34aae30a..39096718b5c 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -207,6 +207,8 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setBytesOnDisk(checksums.getTotalSizeOnDisk()); new_part->setBytesUncompressedOnDisk(checksums.getTotalSizeUncompressedOnDisk()); new_part->index_granularity = writer->getIndexGranularity(); + /// Just in case + new_part->index_granularity.shrinkToFitInMemory(); new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); /// In mutation, existing_rows_count is already calculated in PartMergerWriter diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 936df7b0275..7f6588fc632 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -984,6 +984,8 @@ void finalizeMutatedPart( new_data_part->rows_count = source_part->rows_count; new_data_part->index_granularity = source_part->index_granularity; + /// Just in case + new_data_part->index_granularity.shrinkToFitInMemory(); new_data_part->setIndex(*source_part->getIndex()); new_data_part->minmax_idx = source_part->minmax_idx; new_data_part->modification_time = time(nullptr); From 4fb38411c128e3a293c93d6f1d5f9b71c961e8db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 19:06:36 +0100 Subject: [PATCH 278/566] Only accept regular files --- programs/main.cpp | 2 +- src/Client/ClientBaseOptimizedParts.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/main.cpp b/programs/main.cpp index ea8fbc1aece..d15c20867d1 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -238,7 +238,7 @@ int main(int argc_, char ** argv_) std::error_code ec; if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ') - || std::filesystem::exists(std::filesystem::path{argv[1]}, ec))) + || std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec))) { main_func = mainEntryClickHouseLocal; } diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index bc362288079..afffe775029 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -119,7 +119,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de const char * option; std::error_code ec; - if (std::filesystem::exists(std::filesystem::path{token}, ec)) + if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) option = "queries-file"; else if (token.contains(' ')) option = "query"; From 0ac6ce56bd08e25fc9c22022fec21f3346a753c5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 7 Nov 2024 18:19:26 +0000 Subject: [PATCH 279/566] Trying to fix short-circuit for FilterStep. --- src/Processors/QueryPlan/FilterStep.cpp | 98 ++++++++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 862e03d74f2..64c46332c34 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -5,6 +5,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -24,6 +27,78 @@ static ITransformingStep::Traits getTraits() }; } +static bool isTrivialSubtree(const ActionsDAG::Node * node) +{ + while (node->type == ActionsDAG::ActionType::ALIAS) + node = node->children.at(0); + + return node->type != ActionsDAG::ActionType::FUNCTION && node->type != ActionsDAG::ActionType::ARRAY_JOIN; +} + +struct ActionsAndName +{ + ActionsDAG dag; + std::string name; +}; + +static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::Node * filter_node) +{ + auto name = filter_node->result_name; + auto split_result = dag.split({filter_node}, true); + dag = std::move(split_result.second); + split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_result.split_nodes_mapping[filter_node]); + return ActionsAndName{std::move(split_result.first), std::move(name)}; +} + +static std::optional trySplitSingleAndFilter(ActionsDAG & dag, const std::string & filter_name) +{ + const auto * filter = &dag.findInOutputs(filter_name); + while (filter->type == ActionsDAG::ActionType::ALIAS) + filter = filter->children.at(0); + + if (filter->type != ActionsDAG::ActionType::FUNCTION || filter->function_base->getName() != "and") + return {}; + + const ActionsDAG::Node * condition_to_split = nullptr; + std::stack nodes; + nodes.push(filter); + while (!nodes.empty()) + { + const auto * node = nodes.top(); + nodes.pop(); + + if (node->type == ActionsDAG::ActionType::FUNCTION && node->function_base->getName() == "and") + { + for (const auto * child : node->children | std::ranges::views::reverse) + nodes.push(child); + + continue; + } + + if (isTrivialSubtree(node)) + continue; + + /// Do not split subtree if it's the last non-trivial one. + /// So, split the first found condition only when there is a another one found. + if (condition_to_split) + return splitSingleAndFilter(dag, condition_to_split); + + condition_to_split = node; + } + + return {}; +} + +std::vector splitAndChainIntoMultipleFilters(ActionsDAG & dag, const std::string & filter_name) +{ + std::vector res; + + while (auto condition = trySplitSingleAndFilter(dag, filter_name)) + res.push_back(std::move(*condition)); + + return res; +} + FilterStep::FilterStep( const Header & input_header_, ActionsDAG actions_dag_, @@ -50,6 +125,17 @@ FilterStep::FilterStep( void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { + auto and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag), settings.getActionsSettings()); + pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) + { + bool on_totals = stream_type == QueryPipelineBuilder::StreamType::Totals; + return std::make_shared(header, expression, and_atom.name, true, on_totals); + }); + } + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) @@ -76,13 +162,23 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ void FilterStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); + + auto cloned_dag = actions_dag.clone(); + auto and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag)); + settings.out << prefix << "AND column: " << and_atom.name; + expression->describeActions(settings.out, prefix); + } + settings.out << prefix << "Filter column: " << filter_column_name; if (remove_filter_column) settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(actions_dag.clone()); + auto expression = std::make_shared(std::move(cloned_dag)); expression->describeActions(settings.out, prefix); } From 4e53dda5801cf797a85ad07b9fb55e08aa0cdcf8 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 20:45:31 +0100 Subject: [PATCH 280/566] Use array for conditional mkdir --- docker/server/entrypoint.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 7a990e7d889..5a91d54d32b 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -57,14 +57,14 @@ function create_directory_and_do_chown() { [ -z "$dir" ] && return # ensure directories exist if [ "$DO_CHOWN" = "1" ]; then - mkdir="mkdir" + mkdir=( mkdir ) else # if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions # it mainly happens on NFS mounts where root==nobody for security reasons # thus mkdir MUST run with user id/gid and not from nobody that has zero permissions - mkdir="clickhouse su ""${USER}:${GROUP}"" mkdir" + mkdir=( clickhouse su "${USER}:${GROUP}" mkdir ) fi - if ! $mkdir -p "$dir"; then + if ! "${mkdir[@]}" -p "$dir"; then echo "Couldn't create necessary directory: $dir" exit 1 fi From 2fa357f3747a9436acdeefd4c255e5333c461c3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 20:51:39 +0100 Subject: [PATCH 281/566] Revert "Enable enable_job_stack_trace by default" --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 01339226c2d..6f0109fa300 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2869,7 +2869,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from DECLARE(Bool, calculate_text_stack_trace, true, R"( Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option. )", 0) \ - DECLARE(Bool, enable_job_stack_trace, true, R"( + DECLARE(Bool, enable_job_stack_trace, false, R"( Output stack trace of a job creator when job results in exception )", 0) \ DECLARE(Bool, allow_ddl, true, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index edf4e60706b..c6223bef2b2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -80,7 +80,6 @@ static std::initializer_list Date: Thu, 7 Nov 2024 19:53:30 +0000 Subject: [PATCH 282/566] Fix getting column sample for not finalized part --- src/Columns/ColumnVariant.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 11 ++++++----- src/Storages/MergeTree/IMergeTreeDataPart.h | 6 +++--- src/Storages/MergeTree/IMergeTreeDataPartWriter.h | 2 ++ src/Storages/MergeTree/MergeTreeDataPartCompact.cpp | 2 +- src/Storages/MergeTree/MergeTreeDataPartCompact.h | 2 +- src/Storages/MergeTree/MergeTreeDataPartWide.cpp | 8 ++++---- src/Storages/MergeTree/MergeTreeDataPartWide.h | 4 ++-- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 2 ++ src/Storages/MergeTree/MergedBlockOutputStream.cpp | 2 +- 10 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 54f0421fc4b..2fa59b8e33c 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const if (hasOnlyNulls()) { if (limit) - return cloneResized(limit); + return cloneResized(limit ? std::min(size(), limit) : size()); /// If no limit, we can just return current immutable column. return this->getPtr(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index b631d991e90..f73b52dbafd 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2252,18 +2252,18 @@ void IMergeTreeDataPart::checkConsistencyWithProjections(bool require_part_metad proj_part->checkConsistency(require_part_metadata); } -void IMergeTreeDataPart::calculateColumnsAndSecondaryIndicesSizesOnDisk() +void IMergeTreeDataPart::calculateColumnsAndSecondaryIndicesSizesOnDisk(std::optional columns_sample) { - calculateColumnsSizesOnDisk(); + calculateColumnsSizesOnDisk(columns_sample); calculateSecondaryIndicesSizesOnDisk(); } -void IMergeTreeDataPart::calculateColumnsSizesOnDisk() +void IMergeTreeDataPart::calculateColumnsSizesOnDisk(std::optional columns_sample) { if (getColumns().empty() || checksums.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot calculate columns sizes when columns or checksums are not initialized"); - calculateEachColumnSizes(columns_sizes, total_columns_size); + calculateEachColumnSizes(columns_sizes, total_columns_size, columns_sample); } void IMergeTreeDataPart::calculateSecondaryIndicesSizesOnDisk() @@ -2501,7 +2501,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co { const size_t total_mark = getMarksCount(); /// If column doesn't have dynamic subcolumns or part has no data, just create column using it's type. - if (is_temp || !column.type->hasDynamicSubcolumns() || !total_mark) + if (!column.type->hasDynamicSubcolumns() || !total_mark) return column.type->createColumn(); /// Otherwise, read sample column with 0 rows from the part, so it will load dynamic structure. @@ -2527,6 +2527,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co Columns result; result.resize(1); + LOG_DEBUG(getLogger("IMergeTreeDataPart"), "getColumnSample"); reader->readRows(0, total_mark, false, 0, result); return result[0]; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index b41a1d840e1..a7051a2491a 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -426,7 +426,7 @@ public: bool shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const; /// Calculate column and secondary indices sizes on disk. - void calculateColumnsAndSecondaryIndicesSizesOnDisk(); + void calculateColumnsAndSecondaryIndicesSizesOnDisk(std::optional columns_sample = std::nullopt); std::optional getRelativePathForPrefix(const String & prefix, bool detached = false, bool broken = false) const; @@ -631,7 +631,7 @@ protected: /// Fill each_columns_size and total_size with sizes from columns files on /// disk using columns and checksums. - virtual void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const = 0; + virtual void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const = 0; std::optional getRelativePathForDetachedPart(const String & prefix, bool broken) const; @@ -713,7 +713,7 @@ private: void loadPartitionAndMinMaxIndex(); - void calculateColumnsSizesOnDisk(); + void calculateColumnsSizesOnDisk(std::optional columns_sample = std::nullopt); void calculateSecondaryIndicesSizesOnDisk(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index d1c76505d7c..8923f6a59ca 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -54,6 +54,8 @@ public: const MergeTreeIndexGranularity & getIndexGranularity() const { return index_granularity; } + virtual Block getColumnsSample() const = 0; + protected: SerializationPtr getSerialization(const String & column_name) const; diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 14c2da82de1..8856f467b90 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -80,7 +80,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartCompactWriter( } -void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*each_columns_size*/, ColumnSize & total_size) const +void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*each_columns_size*/, ColumnSize & total_size, std::optional /*columns_sample*/) const { auto bin_checksum = checksums.files.find(DATA_FILE_NAME_WITH_EXTENSION); if (bin_checksum != checksums.files.end()) diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 8e279571578..c394de0d7c1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -70,7 +70,7 @@ private: void loadIndexGranularity() override; /// Compact parts don't support per column size, only total size - void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override; + void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const override; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index b3b6a0dded6..39f96ba06ad 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -82,7 +82,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWideWriter( /// Takes into account the fact that several columns can e.g. share their .size substreams. /// When calculating totals these should be counted only once. ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( - const NameAndTypePair & column, std::unordered_set * processed_substreams) const + const NameAndTypePair & column, std::unordered_set * processed_substreams, std::optional columns_sample) const { ColumnSize size; if (checksums.empty()) @@ -108,7 +108,7 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( auto mrk_checksum = checksums.files.find(*stream_name + getMarksFileExtension()); if (mrk_checksum != checksums.files.end()) size.marks += mrk_checksum->second.file_size; - }, column.type, getColumnSample(column)); + }, column.type, columns_sample && columns_sample->has(column.name) ? columns_sample->getByName(column.name).column : getColumnSample(column)); return size; } @@ -374,12 +374,12 @@ std::optional MergeTreeDataPartWide::getFileNameForColumn(const NameAndT return filename; } -void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const +void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const { std::unordered_set processed_substreams; for (const auto & column : columns) { - ColumnSize size = getColumnSizeImpl(column, &processed_substreams); + ColumnSize size = getColumnSizeImpl(column, &processed_substreams, columns_sample); each_columns_size[column.name] = size; total_size.add(size); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 022a5fb746c..a6d4897ed87 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -64,9 +64,9 @@ private: /// Loads marks index granularity into memory void loadIndexGranularity() override; - ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set * processed_substreams) const; + ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set * processed_substreams, std::optional columns_sample) const; - void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override; + void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const override; }; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 49d654c15e1..b22d58ba51e 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -123,6 +123,8 @@ public: written_offset_columns = written_offset_columns_; } + Block getColumnsSample() const override { return block_sample; } + protected: /// Count index_granularity for block and store in `index_granularity` size_t computeIndexGranularity(const Block & block) const; diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 77c34aae30a..604b2fda20a 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -207,7 +207,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setBytesOnDisk(checksums.getTotalSizeOnDisk()); new_part->setBytesUncompressedOnDisk(checksums.getTotalSizeUncompressedOnDisk()); new_part->index_granularity = writer->getIndexGranularity(); - new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); + new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(writer->getColumnsSample()); /// In mutation, existing_rows_count is already calculated in PartMergerWriter /// In merge situation, lightweight deleted rows was physically deleted, existing_rows_count equals rows_count From 3525954fa3cd116bf0b7ec70dc70be3999cf0090 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 7 Nov 2024 20:55:04 +0100 Subject: [PATCH 283/566] Implicit SELECT in clickhouse-local --- programs/local/LocalServer.cpp | 6 +++--- src/Client/ClientBase.cpp | 5 ++++- src/Client/ClientBaseHelpers.cpp | 11 +++++++++-- src/Client/ClientBaseHelpers.h | 4 +++- src/Core/Settings.cpp | 2 ++ .../0_stateless/03267_implicit_select.reference | 5 +++++ tests/queries/0_stateless/03267_implicit_select.sh | 11 +++++++++++ 7 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 tests/queries/0_stateless/03267_implicit_select.reference create mode 100755 tests/queries/0_stateless/03267_implicit_select.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 1dcef5eb25e..145cac02a3c 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -31,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -50,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -71,9 +68,11 @@ namespace CurrentMetrics namespace DB { + namespace Setting { extern const SettingsBool allow_introspection_functions; + extern const SettingsBool implicit_select; extern const SettingsLocalFSReadMethod storage_file_read_method; } @@ -126,6 +125,7 @@ void applySettingsOverridesForLocal(ContextMutablePtr context) settings[Setting::allow_introspection_functions] = true; settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap; + settings[Setting::implicit_select] = true; context->setSettings(settings); } diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 0a824753dc0..29abed7e52d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2674,7 +2674,10 @@ void ClientBase::runInteractive() #if USE_REPLXX replxx::Replxx::highlighter_callback_t highlight_callback{}; if (getClientConfiguration().getBool("highlight", true)) - highlight_callback = highlight; + highlight_callback = [this](const String & query, std::vector & colors) + { + highlight(query, colors, *client_context); + }; ReplxxLineReader lr( *suggest, diff --git a/src/Client/ClientBaseHelpers.cpp b/src/Client/ClientBaseHelpers.cpp index 156c0c87fb6..ea2a5fd42f5 100644 --- a/src/Client/ClientBaseHelpers.cpp +++ b/src/Client/ClientBaseHelpers.cpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include @@ -12,6 +14,11 @@ namespace DB { +namespace Setting +{ + extern const SettingsBool implicit_select; +} + /// Should we celebrate a bit? bool isNewYearMode() { @@ -95,7 +102,7 @@ bool isChineseNewYearMode(const String & local_tz) } #if USE_REPLXX -void highlight(const String & query, std::vector & colors) +void highlight(const String & query, std::vector & colors, const Context & context) { using namespace replxx; @@ -135,7 +142,7 @@ void highlight(const String & query, std::vector & colors /// Currently we highlight only the first query in the multi-query mode. - ParserQuery parser(end); + ParserQuery parser(end, false, context.getSettingsRef()[Setting::implicit_select]); ASTPtr ast; bool parse_res = false; diff --git a/src/Client/ClientBaseHelpers.h b/src/Client/ClientBaseHelpers.h index adc1c81b3c5..dcfac21c500 100644 --- a/src/Client/ClientBaseHelpers.h +++ b/src/Client/ClientBaseHelpers.h @@ -11,13 +11,15 @@ namespace DB { +class Context; + /// Should we celebrate a bit? bool isNewYearMode(); bool isChineseNewYearMode(const String & local_tz); #if USE_REPLXX -void highlight(const String & query, std::vector & colors); +void highlight(const String & query, std::vector & colors, const Context & context); #endif } diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index f3ada33cb37..049e29dc8d8 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5708,6 +5708,8 @@ If enabled, MongoDB tables will return an error when a MongoDB query cannot be b )", 0) \ DECLARE(Bool, implicit_select, false, R"( Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query. + +In `clickhouse-local` it is enabled by default and can be explicitly disabled. )", 0) \ \ \ diff --git a/tests/queries/0_stateless/03267_implicit_select.reference b/tests/queries/0_stateless/03267_implicit_select.reference new file mode 100644 index 00000000000..97c1fd4333b --- /dev/null +++ b/tests/queries/0_stateless/03267_implicit_select.reference @@ -0,0 +1,5 @@ +3 +3 +3 +Syntax error +3 diff --git a/tests/queries/0_stateless/03267_implicit_select.sh b/tests/queries/0_stateless/03267_implicit_select.sh new file mode 100755 index 00000000000..068fb457bb1 --- /dev/null +++ b/tests/queries/0_stateless/03267_implicit_select.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL "1 + 2" +$CLICKHOUSE_LOCAL -q "1 + 2" +$CLICKHOUSE_LOCAL --query "1 + 2" +$CLICKHOUSE_LOCAL --implicit_select 0 --query "1 + 2" 2>&1 | grep -oF 'Syntax error' +$CLICKHOUSE_LOCAL --implicit_select 0 --query "SELECT 1 + 2" From 8f98f2333f21566ab62430a8bc9379e6b24f6062 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 20:49:06 +0100 Subject: [PATCH 284/566] Make `clickhouse local` fuse in the repository install RUN --- docker/server/Dockerfile.ubuntu | 52 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 2b023a9cf03..0fe9a409ee4 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -88,32 +88,32 @@ RUN if [ -n "${single_binary_location_url}" ]; then \ #docker-official-library:on # A fallback to installation from ClickHouse repository -RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \ - apt-get update \ - && apt-get install --yes --no-install-recommends \ - dirmngr \ - gnupg2 \ - && mkdir -p /etc/apt/sources.list.d \ - && GNUPGHOME=$(mktemp -d) \ - && GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \ - --keyring /usr/share/keyrings/clickhouse-keyring.gpg \ - --keyserver hkp://keyserver.ubuntu.com:80 \ - --recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \ - && rm -rf "$GNUPGHOME" \ - && chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \ - && echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \ - && echo "installing from repository: ${REPOSITORY}" \ - && apt-get update \ - && for package in ${PACKAGES}; do \ - packages="${packages} ${package}=${VERSION}" \ - ; done \ - && apt-get install --yes --no-install-recommends ${packages} || exit 1 \ - && rm -rf \ - /var/lib/apt/lists/* \ - /var/cache/debconf \ - /tmp/* \ - && apt-get autoremove --purge -yq dirmngr gnupg2 \ - ; fi +# It works unless the clickhouse binary already exists +RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \ + ; apt-get update \ + && apt-get install --yes --no-install-recommends \ + dirmngr \ + gnupg2 \ + && mkdir -p /etc/apt/sources.list.d \ + && GNUPGHOME=$(mktemp -d) \ + && GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \ + --keyring /usr/share/keyrings/clickhouse-keyring.gpg \ + --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \ + && rm -rf "$GNUPGHOME" \ + && chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \ + && echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \ + && echo "installing from repository: ${REPOSITORY}" \ + && apt-get update \ + && for package in ${PACKAGES}; do \ + packages="${packages} ${package}=${VERSION}" \ + ; done \ + && apt-get install --yes --no-install-recommends ${packages} || exit 1 \ + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ + && apt-get autoremove --purge -yq dirmngr gnupg2 # post install # we need to allow "others" access to clickhouse folder, because docker container From 0ff0c96b007108ab222a264e4a3bf8aa7cb7a18e Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 7 Nov 2024 20:01:40 +0000 Subject: [PATCH 285/566] Remove logging --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index f73b52dbafd..4e400fb1f94 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2527,7 +2527,6 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co Columns result; result.resize(1); - LOG_DEBUG(getLogger("IMergeTreeDataPart"), "getColumnSample"); reader->readRows(0, total_mark, false, 0, result); return result[0]; } From 76b6cf96eb3f548bc442f645a8cd8999cf3c6f63 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 7 Nov 2024 21:26:23 +0100 Subject: [PATCH 286/566] Highlight multi-statements in the client --- src/Client/ClientBaseHelpers.cpp | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/Client/ClientBaseHelpers.cpp b/src/Client/ClientBaseHelpers.cpp index 156c0c87fb6..f7ecbfeeb43 100644 --- a/src/Client/ClientBaseHelpers.cpp +++ b/src/Client/ClientBaseHelpers.cpp @@ -141,7 +141,24 @@ void highlight(const String & query, std::vector & colors try { - parse_res = parser.parse(token_iterator, ast, expected); + while (true) + { + parse_res = parser.parse(token_iterator, ast, expected); + if (!parse_res) + break; + + if (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon) + { + parse_res = false; + break; + } + + while (token_iterator->type == TokenType::Semicolon) + ++token_iterator; + + if (token_iterator->isEnd()) + break; + } } catch (...) { @@ -175,7 +192,7 @@ void highlight(const String & query, std::vector & colors /// Highlight the last error in red. If the parser failed or the lexer found an invalid token, /// or if it didn't parse all the data (except, the data for INSERT query, which is legitimately unparsed) - if ((!parse_res || last_token.isError() || (!token_iterator->isEnd() && token_iterator->type != TokenType::Semicolon)) + if ((!parse_res || last_token.isError()) && !(insert_data && expected.max_parsed_pos >= insert_data) && expected.max_parsed_pos >= prev) { From c8104cb2ee0f366a56bfd79a07071173a8a5a815 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 7 Nov 2024 21:28:06 +0100 Subject: [PATCH 287/566] Correct and unify exit codes --- programs/client/Client.cpp | 10 ++++++---- programs/disks/DisksApp.cpp | 8 +++++--- programs/keeper-client/KeeperClient.cpp | 6 ++++-- programs/keeper/Keeper.cpp | 4 ++-- programs/library-bridge/LibraryBridge.cpp | 2 +- programs/local/LocalServer.cpp | 12 +++++++----- programs/obfuscator/Obfuscator.cpp | 2 +- programs/odbc-bridge/ODBCBridge.cpp | 2 +- programs/server/Server.cpp | 4 ++-- 9 files changed, 29 insertions(+), 21 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index d7190444f0b..05e1e61be7b 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -431,7 +431,7 @@ catch (const Exception & e) bool need_print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR; std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl << std::endl; /// If exception code isn't zero, we should return non-zero return code anyway. - return e.code() ? e.code() : -1; + return static_cast(e.code()) ? e.code() : -1; } catch (...) { @@ -1390,7 +1390,8 @@ int mainEntryClickHouseClient(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -1399,7 +1400,8 @@ int mainEntryClickHouseClient(int argc, char ** argv) } catch (...) { - std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 1; + std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 610d8eaa638..d6541e99288 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -546,16 +546,18 @@ int mainEntryClickHouseDisks(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 0; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { std::cerr << "Bad arguments: " << e.what() << std::endl; - return 0; + return DB::ErrorCodes::BAD_ARGUMENTS; } catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 0; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp index 2a426fad7ac..4bdddaec59c 100644 --- a/programs/keeper-client/KeeperClient.cpp +++ b/programs/keeper-client/KeeperClient.cpp @@ -448,7 +448,8 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv) catch (const DB::Exception & e) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -458,6 +459,7 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv) catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << std::endl; - return 1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 74af9950e13..936ce15f4c9 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -81,7 +81,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } @@ -672,7 +672,7 @@ catch (...) /// Poco does not provide stacktrace. tryLogCurrentException("Application"); auto code = getCurrentExceptionCode(); - return code ? code : -1; + return static_cast(code) ? code : -1; } diff --git a/programs/library-bridge/LibraryBridge.cpp b/programs/library-bridge/LibraryBridge.cpp index 261484ac744..62dbd12aaf0 100644 --- a/programs/library-bridge/LibraryBridge.cpp +++ b/programs/library-bridge/LibraryBridge.cpp @@ -13,7 +13,7 @@ int mainEntryClickHouseLibraryBridge(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 1dcef5eb25e..d6bf0353e89 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -615,12 +615,14 @@ catch (const DB::Exception & e) { bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false); std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl; - return e.code() ? e.code() : -1; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } catch (...) { - std::cerr << getCurrentExceptionMessage(false) << std::endl; - return getCurrentExceptionCode(); + std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; + auto code = DB::getCurrentExceptionCode(); + return static_cast(code) ? code : 1; } void LocalServer::updateLoggerLevel(const String & logs_level) @@ -1029,7 +1031,7 @@ int mainEntryClickHouseLocal(int argc, char ** argv) { std::cerr << DB::getExceptionMessage(e, false) << std::endl; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } catch (const boost::program_options::error & e) { @@ -1040,6 +1042,6 @@ int mainEntryClickHouseLocal(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << '\n'; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 324a4573b24..6bd3865b591 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1480,5 +1480,5 @@ catch (...) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } diff --git a/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp index 096d1b2dcca..e5ae3272d40 100644 --- a/programs/odbc-bridge/ODBCBridge.cpp +++ b/programs/odbc-bridge/ODBCBridge.cpp @@ -13,7 +13,7 @@ int mainEntryClickHouseODBCBridge(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 5159f95419e..68f262079ff 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -343,7 +343,7 @@ int mainEntryClickHouseServer(int argc, char ** argv) { std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; + return static_cast(code) ? code : 1; } } @@ -2537,7 +2537,7 @@ catch (...) /// Poco does not provide stacktrace. tryLogCurrentException("Application"); auto code = getCurrentExceptionCode(); - return code ? code : -1; + return static_cast(code) ? code : -1; } std::unique_ptr Server::buildProtocolStackFromConfig( From a027f1bf3cde1442a427610cf17967147cb0d60c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:59:11 -0500 Subject: [PATCH 288/566] Revert "Revert "Enable enable_job_stack_trace by default"" --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 6f0109fa300..01339226c2d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2869,7 +2869,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from DECLARE(Bool, calculate_text_stack_trace, true, R"( Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option. )", 0) \ - DECLARE(Bool, enable_job_stack_trace, false, R"( + DECLARE(Bool, enable_job_stack_trace, true, R"( Output stack trace of a job creator when job results in exception )", 0) \ DECLARE(Bool, allow_ddl, true, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c6223bef2b2..edf4e60706b 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -80,6 +80,7 @@ static std::initializer_list Date: Thu, 7 Nov 2024 16:01:02 -0500 Subject: [PATCH 289/566] move enable_job_stack_trace change to 24.11 --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index edf4e60706b..0ff9d0a6833 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,7 @@ static std::initializer_list Date: Thu, 7 Nov 2024 22:40:06 +0100 Subject: [PATCH 290/566] Update src/Client/ClientBaseHelpers.cpp Co-authored-by: Konstantin Bogdanov --- src/Client/ClientBaseHelpers.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBaseHelpers.cpp b/src/Client/ClientBaseHelpers.cpp index f7ecbfeeb43..555e95f7a25 100644 --- a/src/Client/ClientBaseHelpers.cpp +++ b/src/Client/ClientBaseHelpers.cpp @@ -141,7 +141,7 @@ void highlight(const String & query, std::vector & colors try { - while (true) + while (!token_iterator->isEnd()) { parse_res = parser.parse(token_iterator, ast, expected); if (!parse_res) From 1e87298a1ceafcf10fe0e5586604387bab0c6048 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 7 Nov 2024 22:40:21 +0100 Subject: [PATCH 291/566] Update ClientBaseHelpers.cpp --- src/Client/ClientBaseHelpers.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Client/ClientBaseHelpers.cpp b/src/Client/ClientBaseHelpers.cpp index 555e95f7a25..8bdbab99e13 100644 --- a/src/Client/ClientBaseHelpers.cpp +++ b/src/Client/ClientBaseHelpers.cpp @@ -155,9 +155,6 @@ void highlight(const String & query, std::vector & colors while (token_iterator->type == TokenType::Semicolon) ++token_iterator; - - if (token_iterator->isEnd()) - break; } } catch (...) From 16a670166c9ad6365716d0bccb8320b0f8706efe Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 7 Nov 2024 21:48:11 +0000 Subject: [PATCH 292/566] Update version_date.tsv and changelogs after v24.3.13.40-lts --- docs/changelogs/v24.3.13.40-lts.md | 31 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 32 insertions(+) create mode 100644 docs/changelogs/v24.3.13.40-lts.md diff --git a/docs/changelogs/v24.3.13.40-lts.md b/docs/changelogs/v24.3.13.40-lts.md new file mode 100644 index 00000000000..cec96e16292 --- /dev/null +++ b/docs/changelogs/v24.3.13.40-lts.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). +* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index cf28db5d49a..fab562a8cbb 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -31,6 +31,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.13.40-lts 2024-11-07 v24.3.12.75-lts 2024-10-08 v24.3.11.7-lts 2024-09-06 v24.3.10.33-lts 2024-09-03 From f71b00c5136bec4fe40393a45310c1f85a50e5d0 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 7 Nov 2024 22:52:27 +0100 Subject: [PATCH 293/566] Lint --- docs/changelogs/v24.3.13.40-lts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelogs/v24.3.13.40-lts.md b/docs/changelogs/v24.3.13.40-lts.md index cec96e16292..bce45e88710 100644 --- a/docs/changelogs/v24.3.13.40-lts.md +++ b/docs/changelogs/v24.3.13.40-lts.md @@ -16,7 +16,7 @@ sidebar_label: 2024 * Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). * Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). * Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). -* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). * Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). * Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). From dc9e1e047b5cf27dde9dd8b0184cdcdd006202ed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 7 Nov 2024 23:18:39 +0100 Subject: [PATCH 294/566] Fix tests --- tests/queries/0_stateless/02751_multiquery_with_argument.sh | 2 +- tests/queries/0_stateless/02771_multiple_query_arguments.sh | 2 +- .../02800_clickhouse_local_default_settings.reference | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02751_multiquery_with_argument.sh b/tests/queries/0_stateless/02751_multiquery_with_argument.sh index 4021194656b..4378786c145 100755 --- a/tests/queries/0_stateless/02751_multiquery_with_argument.sh +++ b/tests/queries/0_stateless/02751_multiquery_with_argument.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_LOCAL "SELECT 101;" $CLICKHOUSE_LOCAL "SELECT 102;SELECT 103;" # Invalid SQL. -$CLICKHOUSE_LOCAL "SELECT 200; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_LOCAL --implicit-select 0 "SELECT 200; S" 2>&1 | grep -o 'Syntax error' $CLICKHOUSE_LOCAL "; SELECT 201;" 2>&1 | grep -o 'Empty query' $CLICKHOUSE_LOCAL "; S; SELECT 202" 2>&1 | grep -o 'Empty query' diff --git a/tests/queries/0_stateless/02771_multiple_query_arguments.sh b/tests/queries/0_stateless/02771_multiple_query_arguments.sh index ae6e23eb61a..fcc1394573a 100755 --- a/tests/queries/0_stateless/02771_multiple_query_arguments.sh +++ b/tests/queries/0_stateless/02771_multiple_query_arguments.sh @@ -18,4 +18,4 @@ $CLICKHOUSE_LOCAL --query "SELECT 202;" --query "SELECT 202;" $CLICKHOUSE_LOCAL --query "SELECT 303" --query "SELECT 303; SELECT 303" $CLICKHOUSE_LOCAL --query "" --query "" $CLICKHOUSE_LOCAL --query "SELECT 303" --query 2>&1 | grep -o 'Bad arguments' -$CLICKHOUSE_LOCAL --query "SELECT 303" --query "SELE" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_LOCAL --implicit-select 0 --query "SELECT 303" --query "SELE" 2>&1 | grep -o 'Syntax error' diff --git a/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference index 0f18d1a3897..54c6f7ce397 100644 --- a/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference +++ b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference @@ -1,2 +1,3 @@ allow_introspection_functions 1 storage_file_read_method mmap +implicit_select 1 From 6054f43000c645a6a470d06e8d935cf792da3011 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:14:25 +0100 Subject: [PATCH 295/566] Make Vertical format prettier --- src/Formats/PrettyFormatHelpers.cpp | 102 ++ src/Formats/PrettyFormatHelpers.h | 18 + src/Interpreters/InterpreterSystemQuery.cpp | 4 +- .../Formats/Impl/PrettyBlockOutputFormat.cpp | 94 +- .../Formats/Impl/PrettyBlockOutputFormat.h | 5 +- .../Impl/PrettyCompactBlockOutputFormat.cpp | 4 +- .../Impl/PrettySpaceBlockOutputFormat.cpp | 4 +- .../Formats/Impl/VerticalRowOutputFormat.cpp | 28 +- .../Formats/Impl/VerticalRowOutputFormat.h | 3 + .../03268_vertical_pretty_numbers.reference | 1532 +++++++++++++++++ .../03268_vertical_pretty_numbers.sql | 11 + 11 files changed, 1707 insertions(+), 98 deletions(-) create mode 100644 src/Formats/PrettyFormatHelpers.cpp create mode 100644 src/Formats/PrettyFormatHelpers.h create mode 100644 tests/queries/0_stateless/03268_vertical_pretty_numbers.reference create mode 100644 tests/queries/0_stateless/03268_vertical_pretty_numbers.sql diff --git a/src/Formats/PrettyFormatHelpers.cpp b/src/Formats/PrettyFormatHelpers.cpp new file mode 100644 index 00000000000..6e2af036651 --- /dev/null +++ b/src/Formats/PrettyFormatHelpers.cpp @@ -0,0 +1,102 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ + +void writeReadableNumberTipIfSingleValue(WriteBuffer & out, const Chunk & chunk, const FormatSettings & settings, bool color) +{ + if (chunk.getNumRows() == 1 && chunk.getNumColumns() == 1) + writeReadableNumberTip(out, *chunk.getColumns()[0], 0, settings, color); +} + +void writeReadableNumberTip(WriteBuffer & out, const IColumn & column, size_t row, const FormatSettings & settings, bool color) +{ + if (column.isNullAt(row)) + return; + + auto value = column.getFloat64(row); + auto threshold = settings.pretty.output_format_pretty_single_large_number_tip_threshold; + + if (threshold && isFinite(value) && abs(value) > threshold) + { + if (color) + writeCString("\033[90m", out); + writeCString(" -- ", out); + formatReadableQuantity(value, out, 2); + if (color) + writeCString("\033[0m", out); + } +} + + +String highlightDigitGroups(String source) +{ + if (source.size() <= 4) + return source; + + bool is_regular_number = true; + size_t num_digits_before_decimal = 0; + for (auto c : source) + { + if (c == '-' || c == ' ') + continue; + if (c == '.') + break; + if (c >= '0' && c <= '9') + { + ++num_digits_before_decimal; + } + else + { + is_regular_number = false; + break; + } + } + + if (!is_regular_number || num_digits_before_decimal <= 4) + return source; + + String result; + size_t size = source.size(); + result.reserve(2 * size); + + bool before_decimal = true; + size_t digit_num = 0; + for (size_t i = 0; i < size; ++i) + { + auto c = source[i]; + if (before_decimal && c >= '0' && c <= '9') + { + ++digit_num; + size_t offset = num_digits_before_decimal - digit_num; + if (offset && offset % 3 == 0) + { + result += "\033[4m"; + result += c; + result += "\033[0m"; + } + else + { + result += c; + } + } + else if (c == '.') + { + before_decimal = false; + result += c; + } + else + { + result += c; + } + } + + return result; +} + +} diff --git a/src/Formats/PrettyFormatHelpers.h b/src/Formats/PrettyFormatHelpers.h new file mode 100644 index 00000000000..72ab5e3c2a0 --- /dev/null +++ b/src/Formats/PrettyFormatHelpers.h @@ -0,0 +1,18 @@ +#include + +namespace DB +{ + +class Chunk; +class IColumn; +class WriteBuffer; +struct FormatSettings; + +/// Prints text describing the number in the form of: -- 12.34 million +void writeReadableNumberTip(WriteBuffer & out, const IColumn & column, size_t row, const FormatSettings & settings, bool color); +void writeReadableNumberTipIfSingleValue(WriteBuffer & out, const Chunk & chunk, const FormatSettings & settings, bool color); + +/// Underscores digit groups related to thousands using terminal ANSI escape sequences. +String highlightDigitGroups(String source); + +} diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 4c875026ace..b651bfb245e 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -795,9 +795,9 @@ BlockIO InterpreterSystemQuery::execute() case Type::WAIT_FAILPOINT: { getContext()->checkAccess(AccessType::SYSTEM_FAILPOINT); - LOG_TRACE(log, "waiting for failpoint {}", query.fail_point_name); + LOG_TRACE(log, "Waiting for failpoint {}", query.fail_point_name); FailPointInjection::pauseFailPoint(query.fail_point_name); - LOG_TRACE(log, "finished failpoint {}", query.fail_point_name); + LOG_TRACE(log, "Finished waiting for failpoint {}", query.fail_point_name); break; } case Type::RESET_COVERAGE: diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index ff1a048029d..e8b55ea423b 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -352,7 +353,8 @@ void PrettyBlockOutputFormat::writeChunk(const Chunk & chunk, PortKind port_kind } writeCString(grid_symbols.bar, out); - writeReadableNumberTip(chunk); + if (readable_number_tip) + writeReadableNumberTipIfSingleValue(out, chunk, format_settings, color); writeCString("\n", out); } @@ -392,72 +394,6 @@ void PrettyBlockOutputFormat::writeChunk(const Chunk & chunk, PortKind port_kind } -static String highlightDigitGroups(String source) -{ - if (source.size() <= 4) - return source; - - bool is_regular_number = true; - size_t num_digits_before_decimal = 0; - for (auto c : source) - { - if (c == '-' || c == ' ') - continue; - if (c == '.') - break; - if (c >= '0' && c <= '9') - { - ++num_digits_before_decimal; - } - else - { - is_regular_number = false; - break; - } - } - - if (!is_regular_number || num_digits_before_decimal <= 4) - return source; - - String result; - size_t size = source.size(); - result.reserve(2 * size); - - bool before_decimal = true; - size_t digit_num = 0; - for (size_t i = 0; i < size; ++i) - { - auto c = source[i]; - if (before_decimal && c >= '0' && c <= '9') - { - ++digit_num; - size_t offset = num_digits_before_decimal - digit_num; - if (offset && offset % 3 == 0) - { - result += "\033[4m"; - result += c; - result += "\033[0m"; - } - else - { - result += c; - } - } - else if (c == '.') - { - before_decimal = false; - result += c; - } - else - { - result += c; - } - } - - return result; -} - - void PrettyBlockOutputFormat::writeValueWithPadding( const IColumn & column, const ISerialization & serialization, size_t row_num, size_t value_width, size_t pad_to_width, size_t cut_to_width, bool align_right, bool is_number) @@ -553,30 +489,6 @@ void PrettyBlockOutputFormat::writeSuffix() } } -void PrettyBlockOutputFormat::writeReadableNumberTip(const Chunk & chunk) -{ - const auto & columns = chunk.getColumns(); - auto is_single_number = readable_number_tip && chunk.getNumRows() == 1 && chunk.getNumColumns() == 1; - if (!is_single_number) - return; - - if (columns[0]->isNullAt(0)) - return; - - auto value = columns[0]->getFloat64(0); - auto threshold = format_settings.pretty.output_format_pretty_single_large_number_tip_threshold; - - if (threshold && isFinite(value) && abs(value) > threshold) - { - if (color) - writeCString("\033[90m", out); - writeCString(" -- ", out); - formatReadableQuantity(value, out, 2); - if (color) - writeCString("\033[0m", out); - } -} - void registerOutputFormatPretty(FormatFactory & factory) { registerPrettyFormatWithNoEscapesAndMonoBlock(factory, "Pretty"); diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 698efecd4b2..824a2fd2e6f 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -38,7 +38,6 @@ protected: virtual void writeChunk(const Chunk & chunk, PortKind port_kind); void writeMonoChunkIfNeeded(); void writeSuffix() override; - void writeReadableNumberTip(const Chunk & chunk); void onRowsReadBeforeUpdate() override { total_rows = getRowsReadBefore(); } @@ -57,8 +56,10 @@ protected: bool color; -private: +protected: bool readable_number_tip = false; + +private: bool mono_block; /// For mono_block == true only Chunk mono_chunk; diff --git a/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp index 57ec23e7e3b..1e4f784bc71 100644 --- a/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -189,7 +190,8 @@ void PrettyCompactBlockOutputFormat::writeRow( } writeCString(grid_symbols.bar, out); - writeReadableNumberTip(chunk); + if (readable_number_tip) + writeReadableNumberTipIfSingleValue(out, chunk, format_settings, color); writeCString("\n", out); } diff --git a/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp index 0a594b54b12..5b481099e41 100644 --- a/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -102,7 +103,8 @@ void PrettySpaceBlockOutputFormat::writeChunk(const Chunk & chunk, PortKind port writeValueWithPadding( *columns[column], *serializations[column], row, cur_width, max_widths[column], cut_to_width, type.shouldAlignRightInPrettyFormats(), isNumber(type)); } - writeReadableNumberTip(chunk); + if (readable_number_tip) + writeReadableNumberTipIfSingleValue(out, chunk, format_settings, color); writeChar('\n', out); } diff --git a/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp index 4852af9f0c8..7b0135b3ae4 100644 --- a/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp @@ -4,7 +4,10 @@ #include #include #include +#include #include +#include +#include namespace DB @@ -14,6 +17,8 @@ VerticalRowOutputFormat::VerticalRowOutputFormat( WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) : IRowOutputFormat(header_, out_), format_settings(format_settings_) { + color = format_settings.pretty.color == 1 || (format_settings.pretty.color == 2 && format_settings.is_writing_to_terminal); + const auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); @@ -31,6 +36,7 @@ VerticalRowOutputFormat::VerticalRowOutputFormat( } names_and_paddings.resize(columns); + is_number.resize(columns); for (size_t i = 0; i < columns; ++i) { WriteBufferFromString buf(names_and_paddings[i]); @@ -42,6 +48,7 @@ VerticalRowOutputFormat::VerticalRowOutputFormat( { size_t new_size = max_name_width - name_widths[i] + names_and_paddings[i].size(); names_and_paddings[i].resize(new_size, ' '); + is_number[i] = isNumber(removeNullable(recursiveRemoveLowCardinality(sample.getByPosition(i).type))); } } @@ -61,7 +68,26 @@ void VerticalRowOutputFormat::writeField(const IColumn & column, const ISerializ void VerticalRowOutputFormat::writeValue(const IColumn & column, const ISerialization & serialization, size_t row_num) const { - serialization.serializeText(column, row_num, out, format_settings); + if (color && format_settings.pretty.highlight_digit_groups && is_number[field_number]) + { + String serialized_value; + { + WriteBufferFromString buf(serialized_value); + serialization.serializeText(column, row_num, buf, format_settings); + } + + /// Highlight groups of thousands. + serialized_value = highlightDigitGroups(serialized_value); + out.write(serialized_value.data(), serialized_value.size()); + } + else + { + serialization.serializeText(column, row_num, out, format_settings); + } + + /// Write a tip. + if (is_number[field_number]) + writeReadableNumberTip(out, column, row_num, format_settings, color); } diff --git a/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h index 5870c3503fc..6fe79adc9be 100644 --- a/src/Processors/Formats/Impl/VerticalRowOutputFormat.h +++ b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h @@ -56,6 +56,9 @@ private: using NamesAndPaddings = std::vector; NamesAndPaddings names_and_paddings; + + std::vector is_number; + bool color; }; } diff --git a/tests/queries/0_stateless/03268_vertical_pretty_numbers.reference b/tests/queries/0_stateless/03268_vertical_pretty_numbers.reference new file mode 100644 index 00000000000..397e9145798 --- /dev/null +++ b/tests/queries/0_stateless/03268_vertical_pretty_numbers.reference @@ -0,0 +1,1532 @@ +Row 1: +────── +exp2(number): 1 +exp10(number): 1 +concat('test', number): test0 + +Row 2: +────── +exp2(number): 2 -- 2.00 +exp10(number): 10 -- 10.00 +concat('test', number): test1 + +Row 3: +────── +exp2(number): 4 -- 4.00 +exp10(number): 100 -- 100.00 +concat('test', number): test2 + +Row 4: +────── +exp2(number): 8 -- 8.00 +exp10(number): 1000 -- 1.00 thousand +concat('test', number): test3 + +Row 5: +────── +exp2(number): 16 -- 16.00 +exp10(number): 10000 -- 10.00 thousand +concat('test', number): test4 + +Row 6: +────── +exp2(number): 32 -- 32.00 +exp10(number): 100000 -- 100.00 thousand +concat('test', number): test5 + +Row 7: +────── +exp2(number): 64 -- 64.00 +exp10(number): 1000000 -- 1.00 million +concat('test', number): test6 + +Row 8: +────── +exp2(number): 128 -- 128.00 +exp10(number): 10000000 -- 10.00 million +concat('test', number): test7 + +Row 9: +─────── +exp2(number): 256 -- 256.00 +exp10(number): 100000000 -- 100.00 million +concat('test', number): test8 + +Row 10: +─────── +exp2(number): 512 -- 512.00 +exp10(number): 1000000000 -- 1.00 billion +concat('test', number): test9 + +Row 11: +─────── +exp2(number): 1024 -- 1.02 thousand +exp10(number): 10000000000 -- 10.00 billion +concat('test', number): test10 + +Row 12: +─────── +exp2(number): 2048 -- 2.05 thousand +exp10(number): 100000000000 -- 100.00 billion +concat('test', number): test11 + +Row 13: +─────── +exp2(number): 4096 -- 4.10 thousand +exp10(number): 1000000000000 -- 1.00 trillion +concat('test', number): test12 + +Row 14: +─────── +exp2(number): 8192 -- 8.19 thousand +exp10(number): 10000000000000 -- 10.00 trillion +concat('test', number): test13 + +Row 15: +─────── +exp2(number): 16384 -- 16.38 thousand +exp10(number): 100000000000000 -- 100.00 trillion +concat('test', number): test14 + +Row 16: +─────── +exp2(number): 32768 -- 32.77 thousand +exp10(number): 1000000000000000 -- 1.00 quadrillion +concat('test', number): test15 + +Row 17: +─────── +exp2(number): 65536 -- 65.54 thousand +exp10(number): 10000000000000000 -- 10.00 quadrillion +concat('test', number): test16 + +Row 18: +─────── +exp2(number): 131072 -- 131.07 thousand +exp10(number): 100000000000000000 -- 100.00 quadrillion +concat('test', number): test17 + +Row 19: +─────── +exp2(number): 262144 -- 262.14 thousand +exp10(number): 1000000000000000000 -- 1.00 quintillion +concat('test', number): test18 + +Row 20: +─────── +exp2(number): 524288 -- 524.29 thousand +exp10(number): 10000000000000000000 -- 10.00 quintillion +concat('test', number): test19 + +Row 21: +─────── +exp2(number): 1048576 -- 1.05 million +exp10(number): 100000000000000000000 -- 100.00 quintillion +concat('test', number): test20 + +Row 22: +─────── +exp2(number): 2097152 -- 2.10 million +exp10(number): 1e21 -- 1.00 sextillion +concat('test', number): test21 + +Row 23: +─────── +exp2(number): 4194304 -- 4.19 million +exp10(number): 1e22 -- 10.00 sextillion +concat('test', number): test22 + +Row 24: +─────── +exp2(number): 8388608 -- 8.39 million +exp10(number): 1e23 -- 100.00 sextillion +concat('test', number): test23 + +Row 25: +─────── +exp2(number): 16777216 -- 16.78 million +exp10(number): 1e24 -- 1.00 septillion +concat('test', number): test24 + +Row 26: +─────── +exp2(number): 33554432 -- 33.55 million +exp10(number): 1e25 -- 10.00 septillion +concat('test', number): test25 + +Row 27: +─────── +exp2(number): 67108864 -- 67.11 million +exp10(number): 1e26 -- 100.00 septillion +concat('test', number): test26 + +Row 28: +─────── +exp2(number): 134217728 -- 134.22 million +exp10(number): 1e27 -- 1.00 octillion +concat('test', number): test27 + +Row 29: +─────── +exp2(number): 268435456 -- 268.44 million +exp10(number): 1e28 -- 10.00 octillion +concat('test', number): test28 + +Row 30: +─────── +exp2(number): 536870912 -- 536.87 million +exp10(number): 1e29 -- 100.00 octillion +concat('test', number): test29 + +Row 31: +─────── +exp2(number): 1073741824 -- 1.07 billion +exp10(number): 1e30 -- 1.00 nonillion +concat('test', number): test30 + +Row 32: +─────── +exp2(number): 2147483648 -- 2.15 billion +exp10(number): 1e31 -- 10.00 nonillion +concat('test', number): test31 + +Row 33: +─────── +exp2(number): 4294967296 -- 4.29 billion +exp10(number): 1e32 -- 100.00 nonillion +concat('test', number): test32 + +Row 34: +─────── +exp2(number): 8589934592 -- 8.59 billion +exp10(number): 1e33 -- 1000.00 nonillion +concat('test', number): test33 + +Row 35: +─────── +exp2(number): 17179869184 -- 17.18 billion +exp10(number): 1e34 -- 10.00 decillion +concat('test', number): test34 + +Row 36: +─────── +exp2(number): 34359738368 -- 34.36 billion +exp10(number): 1e35 -- 100.00 decillion +concat('test', number): test35 + +Row 37: +─────── +exp2(number): 68719476736 -- 68.72 billion +exp10(number): 1e36 -- 1.00 undecillion +concat('test', number): test36 + +Row 38: +─────── +exp2(number): 137438953472 -- 137.44 billion +exp10(number): 1e37 -- 10.00 undecillion +concat('test', number): test37 + +Row 39: +─────── +exp2(number): 274877906944 -- 274.88 billion +exp10(number): 1e38 -- 100.00 undecillion +concat('test', number): test38 + +Row 40: +─────── +exp2(number): 549755813888 -- 549.76 billion +exp10(number): 1e39 -- 1000.00 undecillion +concat('test', number): test39 + +Row 41: +─────── +exp2(number): 1099511627776 -- 1.10 trillion +exp10(number): 1e40 -- 10.00 duodecillion +concat('test', number): test40 + +Row 42: +─────── +exp2(number): 2199023255552 -- 2.20 trillion +exp10(number): 1e41 -- 100.00 duodecillion +concat('test', number): test41 + +Row 43: +─────── +exp2(number): 4398046511104 -- 4.40 trillion +exp10(number): 1e42 -- 1.00 tredecillion +concat('test', number): test42 + +Row 44: +─────── +exp2(number): 8796093022208 -- 8.80 trillion +exp10(number): 1e43 -- 10.00 tredecillion +concat('test', number): test43 + +Row 45: +─────── +exp2(number): 17592186044416 -- 17.59 trillion +exp10(number): 1e44 -- 100.00 tredecillion +concat('test', number): test44 + +Row 46: +─────── +exp2(number): 35184372088832 -- 35.18 trillion +exp10(number): 1e45 -- 1000.00 tredecillion +concat('test', number): test45 + +Row 47: +─────── +exp2(number): 70368744177664 -- 70.37 trillion +exp10(number): 1e46 -- 10.00 quattuordecillion +concat('test', number): test46 + +Row 48: +─────── +exp2(number): 140737488355328 -- 140.74 trillion +exp10(number): 1e47 -- 100.00 quattuordecillion +concat('test', number): test47 + +Row 49: +─────── +exp2(number): 281474976710656 -- 281.47 trillion +exp10(number): 1e48 -- 1.00 quindecillion +concat('test', number): test48 + +Row 50: +─────── +exp2(number): 562949953421312 -- 562.95 trillion +exp10(number): 1e49 -- 10.00 quindecillion +concat('test', number): test49 + +Row 51: +─────── +exp2(number): 1125899906842624 -- 1.13 quadrillion +exp10(number): 1e50 -- 100.00 quindecillion +concat('test', number): test50 + +Row 52: +─────── +exp2(number): 2251799813685248 -- 2.25 quadrillion +exp10(number): 1e51 -- 1.00 sexdecillion +concat('test', number): test51 + +Row 53: +─────── +exp2(number): 4503599627370496 -- 4.50 quadrillion +exp10(number): 1e52 -- 10.00 sexdecillion +concat('test', number): test52 + +Row 54: +─────── +exp2(number): 9007199254740992 -- 9.01 quadrillion +exp10(number): 1e53 -- 100.00 sexdecillion +concat('test', number): test53 + +Row 55: +─────── +exp2(number): 18014398509481984 -- 18.01 quadrillion +exp10(number): 1e54 -- 1.00 septendecillion +concat('test', number): test54 + +Row 56: +─────── +exp2(number): 36028797018963970 -- 36.03 quadrillion +exp10(number): 1e55 -- 10.00 septendecillion +concat('test', number): test55 + +Row 57: +─────── +exp2(number): 72057594037927940 -- 72.06 quadrillion +exp10(number): 1e56 -- 100.00 septendecillion +concat('test', number): test56 + +Row 58: +─────── +exp2(number): 144115188075855870 -- 144.12 quadrillion +exp10(number): 1e57 -- 1.00 octodecillion +concat('test', number): test57 + +Row 59: +─────── +exp2(number): 288230376151711740 -- 288.23 quadrillion +exp10(number): 1e58 -- 10.00 octodecillion +concat('test', number): test58 + +Row 60: +─────── +exp2(number): 576460752303423500 -- 576.46 quadrillion +exp10(number): 1e59 -- 100.00 octodecillion +concat('test', number): test59 + +Row 61: +─────── +exp2(number): 1152921504606847000 -- 1.15 quintillion +exp10(number): 1e60 -- 1000.00 octodecillion +concat('test', number): test60 + +Row 62: +─────── +exp2(number): 2305843009213694000 -- 2.31 quintillion +exp10(number): 1e61 -- 10.00 novemdecillion +concat('test', number): test61 + +Row 63: +─────── +exp2(number): 4611686018427388000 -- 4.61 quintillion +exp10(number): 1e62 -- 100.00 novemdecillion +concat('test', number): test62 + +Row 64: +─────── +exp2(number): 9223372036854776000 -- 9.22 quintillion +exp10(number): 1e63 -- 1.00 vigintillion +concat('test', number): test63 +Row 1: +────── +exp2(number): 1 +exp10(number): 1 +concat('test', number): test0 + +Row 2: +────── +exp2(number): 2 -- 2.00 +exp10(number): 10 -- 10.00 +concat('test', number): test1 + +Row 3: +────── +exp2(number): 4 -- 4.00 +exp10(number): 100 -- 100.00 +concat('test', number): test2 + +Row 4: +────── +exp2(number): 8 -- 8.00 +exp10(number): 1000 -- 1.00 thousand +concat('test', number): test3 + +Row 5: +────── +exp2(number): 16 -- 16.00 +exp10(number): 10000 -- 10.00 thousand +concat('test', number): test4 + +Row 6: +────── +exp2(number): 32 -- 32.00 +exp10(number): 100000 -- 100.00 thousand +concat('test', number): test5 + +Row 7: +────── +exp2(number): 64 -- 64.00 +exp10(number): 1000000 -- 1.00 million +concat('test', number): test6 + +Row 8: +────── +exp2(number): 128 -- 128.00 +exp10(number): 10000000 -- 10.00 million +concat('test', number): test7 + +Row 9: +─────── +exp2(number): 256 -- 256.00 +exp10(number): 100000000 -- 100.00 million +concat('test', number): test8 + +Row 10: +─────── +exp2(number): 512 -- 512.00 +exp10(number): 1000000000 -- 1.00 billion +concat('test', number): test9 + +Row 11: +─────── +exp2(number): 1024 -- 1.02 thousand +exp10(number): 10000000000 -- 10.00 billion +concat('test', number): test10 + +Row 12: +─────── +exp2(number): 2048 -- 2.05 thousand +exp10(number): 100000000000 -- 100.00 billion +concat('test', number): test11 + +Row 13: +─────── +exp2(number): 4096 -- 4.10 thousand +exp10(number): 1000000000000 -- 1.00 trillion +concat('test', number): test12 + +Row 14: +─────── +exp2(number): 8192 -- 8.19 thousand +exp10(number): 10000000000000 -- 10.00 trillion +concat('test', number): test13 + +Row 15: +─────── +exp2(number): 16384 -- 16.38 thousand +exp10(number): 100000000000000 -- 100.00 trillion +concat('test', number): test14 + +Row 16: +─────── +exp2(number): 32768 -- 32.77 thousand +exp10(number): 1000000000000000 -- 1.00 quadrillion +concat('test', number): test15 + +Row 17: +─────── +exp2(number): 65536 -- 65.54 thousand +exp10(number): 10000000000000000 -- 10.00 quadrillion +concat('test', number): test16 + +Row 18: +─────── +exp2(number): 131072 -- 131.07 thousand +exp10(number): 100000000000000000 -- 100.00 quadrillion +concat('test', number): test17 + +Row 19: +─────── +exp2(number): 262144 -- 262.14 thousand +exp10(number): 1000000000000000000 -- 1.00 quintillion +concat('test', number): test18 + +Row 20: +─────── +exp2(number): 524288 -- 524.29 thousand +exp10(number): 10000000000000000000 -- 10.00 quintillion +concat('test', number): test19 + +Row 21: +─────── +exp2(number): 1048576 -- 1.05 million +exp10(number): 100000000000000000000 -- 100.00 quintillion +concat('test', number): test20 + +Row 22: +─────── +exp2(number): 2097152 -- 2.10 million +exp10(number): 1e21 -- 1.00 sextillion +concat('test', number): test21 + +Row 23: +─────── +exp2(number): 4194304 -- 4.19 million +exp10(number): 1e22 -- 10.00 sextillion +concat('test', number): test22 + +Row 24: +─────── +exp2(number): 8388608 -- 8.39 million +exp10(number): 1e23 -- 100.00 sextillion +concat('test', number): test23 + +Row 25: +─────── +exp2(number): 16777216 -- 16.78 million +exp10(number): 1e24 -- 1.00 septillion +concat('test', number): test24 + +Row 26: +─────── +exp2(number): 33554432 -- 33.55 million +exp10(number): 1e25 -- 10.00 septillion +concat('test', number): test25 + +Row 27: +─────── +exp2(number): 67108864 -- 67.11 million +exp10(number): 1e26 -- 100.00 septillion +concat('test', number): test26 + +Row 28: +─────── +exp2(number): 134217728 -- 134.22 million +exp10(number): 1e27 -- 1.00 octillion +concat('test', number): test27 + +Row 29: +─────── +exp2(number): 268435456 -- 268.44 million +exp10(number): 1e28 -- 10.00 octillion +concat('test', number): test28 + +Row 30: +─────── +exp2(number): 536870912 -- 536.87 million +exp10(number): 1e29 -- 100.00 octillion +concat('test', number): test29 + +Row 31: +─────── +exp2(number): 1073741824 -- 1.07 billion +exp10(number): 1e30 -- 1.00 nonillion +concat('test', number): test30 + +Row 32: +─────── +exp2(number): 2147483648 -- 2.15 billion +exp10(number): 1e31 -- 10.00 nonillion +concat('test', number): test31 + +Row 33: +─────── +exp2(number): 4294967296 -- 4.29 billion +exp10(number): 1e32 -- 100.00 nonillion +concat('test', number): test32 + +Row 34: +─────── +exp2(number): 8589934592 -- 8.59 billion +exp10(number): 1e33 -- 1000.00 nonillion +concat('test', number): test33 + +Row 35: +─────── +exp2(number): 17179869184 -- 17.18 billion +exp10(number): 1e34 -- 10.00 decillion +concat('test', number): test34 + +Row 36: +─────── +exp2(number): 34359738368 -- 34.36 billion +exp10(number): 1e35 -- 100.00 decillion +concat('test', number): test35 + +Row 37: +─────── +exp2(number): 68719476736 -- 68.72 billion +exp10(number): 1e36 -- 1.00 undecillion +concat('test', number): test36 + +Row 38: +─────── +exp2(number): 137438953472 -- 137.44 billion +exp10(number): 1e37 -- 10.00 undecillion +concat('test', number): test37 + +Row 39: +─────── +exp2(number): 274877906944 -- 274.88 billion +exp10(number): 1e38 -- 100.00 undecillion +concat('test', number): test38 + +Row 40: +─────── +exp2(number): 549755813888 -- 549.76 billion +exp10(number): 1e39 -- 1000.00 undecillion +concat('test', number): test39 + +Row 41: +─────── +exp2(number): 1099511627776 -- 1.10 trillion +exp10(number): 1e40 -- 10.00 duodecillion +concat('test', number): test40 + +Row 42: +─────── +exp2(number): 2199023255552 -- 2.20 trillion +exp10(number): 1e41 -- 100.00 duodecillion +concat('test', number): test41 + +Row 43: +─────── +exp2(number): 4398046511104 -- 4.40 trillion +exp10(number): 1e42 -- 1.00 tredecillion +concat('test', number): test42 + +Row 44: +─────── +exp2(number): 8796093022208 -- 8.80 trillion +exp10(number): 1e43 -- 10.00 tredecillion +concat('test', number): test43 + +Row 45: +─────── +exp2(number): 17592186044416 -- 17.59 trillion +exp10(number): 1e44 -- 100.00 tredecillion +concat('test', number): test44 + +Row 46: +─────── +exp2(number): 35184372088832 -- 35.18 trillion +exp10(number): 1e45 -- 1000.00 tredecillion +concat('test', number): test45 + +Row 47: +─────── +exp2(number): 70368744177664 -- 70.37 trillion +exp10(number): 1e46 -- 10.00 quattuordecillion +concat('test', number): test46 + +Row 48: +─────── +exp2(number): 140737488355328 -- 140.74 trillion +exp10(number): 1e47 -- 100.00 quattuordecillion +concat('test', number): test47 + +Row 49: +─────── +exp2(number): 281474976710656 -- 281.47 trillion +exp10(number): 1e48 -- 1.00 quindecillion +concat('test', number): test48 + +Row 50: +─────── +exp2(number): 562949953421312 -- 562.95 trillion +exp10(number): 1e49 -- 10.00 quindecillion +concat('test', number): test49 + +Row 51: +─────── +exp2(number): 1125899906842624 -- 1.13 quadrillion +exp10(number): 1e50 -- 100.00 quindecillion +concat('test', number): test50 + +Row 52: +─────── +exp2(number): 2251799813685248 -- 2.25 quadrillion +exp10(number): 1e51 -- 1.00 sexdecillion +concat('test', number): test51 + +Row 53: +─────── +exp2(number): 4503599627370496 -- 4.50 quadrillion +exp10(number): 1e52 -- 10.00 sexdecillion +concat('test', number): test52 + +Row 54: +─────── +exp2(number): 9007199254740992 -- 9.01 quadrillion +exp10(number): 1e53 -- 100.00 sexdecillion +concat('test', number): test53 + +Row 55: +─────── +exp2(number): 18014398509481984 -- 18.01 quadrillion +exp10(number): 1e54 -- 1.00 septendecillion +concat('test', number): test54 + +Row 56: +─────── +exp2(number): 36028797018963970 -- 36.03 quadrillion +exp10(number): 1e55 -- 10.00 septendecillion +concat('test', number): test55 + +Row 57: +─────── +exp2(number): 72057594037927940 -- 72.06 quadrillion +exp10(number): 1e56 -- 100.00 septendecillion +concat('test', number): test56 + +Row 58: +─────── +exp2(number): 144115188075855870 -- 144.12 quadrillion +exp10(number): 1e57 -- 1.00 octodecillion +concat('test', number): test57 + +Row 59: +─────── +exp2(number): 288230376151711740 -- 288.23 quadrillion +exp10(number): 1e58 -- 10.00 octodecillion +concat('test', number): test58 + +Row 60: +─────── +exp2(number): 576460752303423500 -- 576.46 quadrillion +exp10(number): 1e59 -- 100.00 octodecillion +concat('test', number): test59 + +Row 61: +─────── +exp2(number): 1152921504606847000 -- 1.15 quintillion +exp10(number): 1e60 -- 1000.00 octodecillion +concat('test', number): test60 + +Row 62: +─────── +exp2(number): 2305843009213694000 -- 2.31 quintillion +exp10(number): 1e61 -- 10.00 novemdecillion +concat('test', number): test61 + +Row 63: +─────── +exp2(number): 4611686018427388000 -- 4.61 quintillion +exp10(number): 1e62 -- 100.00 novemdecillion +concat('test', number): test62 + +Row 64: +─────── +exp2(number): 9223372036854776000 -- 9.22 quintillion +exp10(number): 1e63 -- 1.00 vigintillion +concat('test', number): test63 +Row 1: +────── +exp2(number): 1 +exp10(number): 1 +concat('test', number): test0 + +Row 2: +────── +exp2(number): 2 -- 2.00 +exp10(number): 10 -- 10.00 +concat('test', number): test1 + +Row 3: +────── +exp2(number): 4 -- 4.00 +exp10(number): 100 -- 100.00 +concat('test', number): test2 + +Row 4: +────── +exp2(number): 8 -- 8.00 +exp10(number): 1000 -- 1.00 thousand +concat('test', number): test3 + +Row 5: +────── +exp2(number): 16 -- 16.00 +exp10(number): 10000 -- 10.00 thousand +concat('test', number): test4 + +Row 6: +────── +exp2(number): 32 -- 32.00 +exp10(number): 100000 -- 100.00 thousand +concat('test', number): test5 + +Row 7: +────── +exp2(number): 64 -- 64.00 +exp10(number): 1000000 -- 1.00 million +concat('test', number): test6 + +Row 8: +────── +exp2(number): 128 -- 128.00 +exp10(number): 10000000 -- 10.00 million +concat('test', number): test7 + +Row 9: +─────── +exp2(number): 256 -- 256.00 +exp10(number): 100000000 -- 100.00 million +concat('test', number): test8 + +Row 10: +─────── +exp2(number): 512 -- 512.00 +exp10(number): 1000000000 -- 1.00 billion +concat('test', number): test9 + +Row 11: +─────── +exp2(number): 1024 -- 1.02 thousand +exp10(number): 10000000000 -- 10.00 billion +concat('test', number): test10 + +Row 12: +─────── +exp2(number): 2048 -- 2.05 thousand +exp10(number): 100000000000 -- 100.00 billion +concat('test', number): test11 + +Row 13: +─────── +exp2(number): 4096 -- 4.10 thousand +exp10(number): 1000000000000 -- 1.00 trillion +concat('test', number): test12 + +Row 14: +─────── +exp2(number): 8192 -- 8.19 thousand +exp10(number): 10000000000000 -- 10.00 trillion +concat('test', number): test13 + +Row 15: +─────── +exp2(number): 16384 -- 16.38 thousand +exp10(number): 100000000000000 -- 100.00 trillion +concat('test', number): test14 + +Row 16: +─────── +exp2(number): 32768 -- 32.77 thousand +exp10(number): 1000000000000000 -- 1.00 quadrillion +concat('test', number): test15 + +Row 17: +─────── +exp2(number): 65536 -- 65.54 thousand +exp10(number): 10000000000000000 -- 10.00 quadrillion +concat('test', number): test16 + +Row 18: +─────── +exp2(number): 131072 -- 131.07 thousand +exp10(number): 100000000000000000 -- 100.00 quadrillion +concat('test', number): test17 + +Row 19: +─────── +exp2(number): 262144 -- 262.14 thousand +exp10(number): 1000000000000000000 -- 1.00 quintillion +concat('test', number): test18 + +Row 20: +─────── +exp2(number): 524288 -- 524.29 thousand +exp10(number): 10000000000000000000 -- 10.00 quintillion +concat('test', number): test19 + +Row 21: +─────── +exp2(number): 1048576 -- 1.05 million +exp10(number): 100000000000000000000 -- 100.00 quintillion +concat('test', number): test20 + +Row 22: +─────── +exp2(number): 2097152 -- 2.10 million +exp10(number): 1e21 -- 1.00 sextillion +concat('test', number): test21 + +Row 23: +─────── +exp2(number): 4194304 -- 4.19 million +exp10(number): 1e22 -- 10.00 sextillion +concat('test', number): test22 + +Row 24: +─────── +exp2(number): 8388608 -- 8.39 million +exp10(number): 1e23 -- 100.00 sextillion +concat('test', number): test23 + +Row 25: +─────── +exp2(number): 16777216 -- 16.78 million +exp10(number): 1e24 -- 1.00 septillion +concat('test', number): test24 + +Row 26: +─────── +exp2(number): 33554432 -- 33.55 million +exp10(number): 1e25 -- 10.00 septillion +concat('test', number): test25 + +Row 27: +─────── +exp2(number): 67108864 -- 67.11 million +exp10(number): 1e26 -- 100.00 septillion +concat('test', number): test26 + +Row 28: +─────── +exp2(number): 134217728 -- 134.22 million +exp10(number): 1e27 -- 1.00 octillion +concat('test', number): test27 + +Row 29: +─────── +exp2(number): 268435456 -- 268.44 million +exp10(number): 1e28 -- 10.00 octillion +concat('test', number): test28 + +Row 30: +─────── +exp2(number): 536870912 -- 536.87 million +exp10(number): 1e29 -- 100.00 octillion +concat('test', number): test29 + +Row 31: +─────── +exp2(number): 1073741824 -- 1.07 billion +exp10(number): 1e30 -- 1.00 nonillion +concat('test', number): test30 + +Row 32: +─────── +exp2(number): 2147483648 -- 2.15 billion +exp10(number): 1e31 -- 10.00 nonillion +concat('test', number): test31 + +Row 33: +─────── +exp2(number): 4294967296 -- 4.29 billion +exp10(number): 1e32 -- 100.00 nonillion +concat('test', number): test32 + +Row 34: +─────── +exp2(number): 8589934592 -- 8.59 billion +exp10(number): 1e33 -- 1000.00 nonillion +concat('test', number): test33 + +Row 35: +─────── +exp2(number): 17179869184 -- 17.18 billion +exp10(number): 1e34 -- 10.00 decillion +concat('test', number): test34 + +Row 36: +─────── +exp2(number): 34359738368 -- 34.36 billion +exp10(number): 1e35 -- 100.00 decillion +concat('test', number): test35 + +Row 37: +─────── +exp2(number): 68719476736 -- 68.72 billion +exp10(number): 1e36 -- 1.00 undecillion +concat('test', number): test36 + +Row 38: +─────── +exp2(number): 137438953472 -- 137.44 billion +exp10(number): 1e37 -- 10.00 undecillion +concat('test', number): test37 + +Row 39: +─────── +exp2(number): 274877906944 -- 274.88 billion +exp10(number): 1e38 -- 100.00 undecillion +concat('test', number): test38 + +Row 40: +─────── +exp2(number): 549755813888 -- 549.76 billion +exp10(number): 1e39 -- 1000.00 undecillion +concat('test', number): test39 + +Row 41: +─────── +exp2(number): 1099511627776 -- 1.10 trillion +exp10(number): 1e40 -- 10.00 duodecillion +concat('test', number): test40 + +Row 42: +─────── +exp2(number): 2199023255552 -- 2.20 trillion +exp10(number): 1e41 -- 100.00 duodecillion +concat('test', number): test41 + +Row 43: +─────── +exp2(number): 4398046511104 -- 4.40 trillion +exp10(number): 1e42 -- 1.00 tredecillion +concat('test', number): test42 + +Row 44: +─────── +exp2(number): 8796093022208 -- 8.80 trillion +exp10(number): 1e43 -- 10.00 tredecillion +concat('test', number): test43 + +Row 45: +─────── +exp2(number): 17592186044416 -- 17.59 trillion +exp10(number): 1e44 -- 100.00 tredecillion +concat('test', number): test44 + +Row 46: +─────── +exp2(number): 35184372088832 -- 35.18 trillion +exp10(number): 1e45 -- 1000.00 tredecillion +concat('test', number): test45 + +Row 47: +─────── +exp2(number): 70368744177664 -- 70.37 trillion +exp10(number): 1e46 -- 10.00 quattuordecillion +concat('test', number): test46 + +Row 48: +─────── +exp2(number): 140737488355328 -- 140.74 trillion +exp10(number): 1e47 -- 100.00 quattuordecillion +concat('test', number): test47 + +Row 49: +─────── +exp2(number): 281474976710656 -- 281.47 trillion +exp10(number): 1e48 -- 1.00 quindecillion +concat('test', number): test48 + +Row 50: +─────── +exp2(number): 562949953421312 -- 562.95 trillion +exp10(number): 1e49 -- 10.00 quindecillion +concat('test', number): test49 + +Row 51: +─────── +exp2(number): 1125899906842624 -- 1.13 quadrillion +exp10(number): 1e50 -- 100.00 quindecillion +concat('test', number): test50 + +Row 52: +─────── +exp2(number): 2251799813685248 -- 2.25 quadrillion +exp10(number): 1e51 -- 1.00 sexdecillion +concat('test', number): test51 + +Row 53: +─────── +exp2(number): 4503599627370496 -- 4.50 quadrillion +exp10(number): 1e52 -- 10.00 sexdecillion +concat('test', number): test52 + +Row 54: +─────── +exp2(number): 9007199254740992 -- 9.01 quadrillion +exp10(number): 1e53 -- 100.00 sexdecillion +concat('test', number): test53 + +Row 55: +─────── +exp2(number): 18014398509481984 -- 18.01 quadrillion +exp10(number): 1e54 -- 1.00 septendecillion +concat('test', number): test54 + +Row 56: +─────── +exp2(number): 36028797018963970 -- 36.03 quadrillion +exp10(number): 1e55 -- 10.00 septendecillion +concat('test', number): test55 + +Row 57: +─────── +exp2(number): 72057594037927940 -- 72.06 quadrillion +exp10(number): 1e56 -- 100.00 septendecillion +concat('test', number): test56 + +Row 58: +─────── +exp2(number): 144115188075855870 -- 144.12 quadrillion +exp10(number): 1e57 -- 1.00 octodecillion +concat('test', number): test57 + +Row 59: +─────── +exp2(number): 288230376151711740 -- 288.23 quadrillion +exp10(number): 1e58 -- 10.00 octodecillion +concat('test', number): test58 + +Row 60: +─────── +exp2(number): 576460752303423500 -- 576.46 quadrillion +exp10(number): 1e59 -- 100.00 octodecillion +concat('test', number): test59 + +Row 61: +─────── +exp2(number): 1152921504606847000 -- 1.15 quintillion +exp10(number): 1e60 -- 1000.00 octodecillion +concat('test', number): test60 + +Row 62: +─────── +exp2(number): 2305843009213694000 -- 2.31 quintillion +exp10(number): 1e61 -- 10.00 novemdecillion +concat('test', number): test61 + +Row 63: +─────── +exp2(number): 4611686018427388000 -- 4.61 quintillion +exp10(number): 1e62 -- 100.00 novemdecillion +concat('test', number): test62 + +Row 64: +─────── +exp2(number): 9223372036854776000 -- 9.22 quintillion +exp10(number): 1e63 -- 1.00 vigintillion +concat('test', number): test63 +Row 1: +────── +exp2(number): 1 +exp10(number): 1 +concat('test', number): test0 + +Row 2: +────── +exp2(number): 2 +exp10(number): 10 +concat('test', number): test1 + +Row 3: +────── +exp2(number): 4 +exp10(number): 100 +concat('test', number): test2 + +Row 4: +────── +exp2(number): 8 +exp10(number): 1000 +concat('test', number): test3 + +Row 5: +────── +exp2(number): 16 +exp10(number): 10000 +concat('test', number): test4 + +Row 6: +────── +exp2(number): 32 +exp10(number): 100000 +concat('test', number): test5 + +Row 7: +────── +exp2(number): 64 +exp10(number): 1000000 +concat('test', number): test6 + +Row 8: +────── +exp2(number): 128 +exp10(number): 10000000 +concat('test', number): test7 + +Row 9: +─────── +exp2(number): 256 +exp10(number): 100000000 +concat('test', number): test8 + +Row 10: +─────── +exp2(number): 512 +exp10(number): 1000000000 +concat('test', number): test9 + +Row 11: +─────── +exp2(number): 1024 +exp10(number): 10000000000 +concat('test', number): test10 + +Row 12: +─────── +exp2(number): 2048 +exp10(number): 100000000000 +concat('test', number): test11 + +Row 13: +─────── +exp2(number): 4096 +exp10(number): 1000000000000 +concat('test', number): test12 + +Row 14: +─────── +exp2(number): 8192 +exp10(number): 10000000000000 +concat('test', number): test13 + +Row 15: +─────── +exp2(number): 16384 +exp10(number): 100000000000000 +concat('test', number): test14 + +Row 16: +─────── +exp2(number): 32768 +exp10(number): 1000000000000000 +concat('test', number): test15 + +Row 17: +─────── +exp2(number): 65536 +exp10(number): 10000000000000000 +concat('test', number): test16 + +Row 18: +─────── +exp2(number): 131072 +exp10(number): 100000000000000000 +concat('test', number): test17 + +Row 19: +─────── +exp2(number): 262144 +exp10(number): 1000000000000000000 +concat('test', number): test18 + +Row 20: +─────── +exp2(number): 524288 +exp10(number): 10000000000000000000 +concat('test', number): test19 + +Row 21: +─────── +exp2(number): 1048576 +exp10(number): 100000000000000000000 +concat('test', number): test20 + +Row 22: +─────── +exp2(number): 2097152 +exp10(number): 1e21 +concat('test', number): test21 + +Row 23: +─────── +exp2(number): 4194304 +exp10(number): 1e22 +concat('test', number): test22 + +Row 24: +─────── +exp2(number): 8388608 +exp10(number): 1e23 +concat('test', number): test23 + +Row 25: +─────── +exp2(number): 16777216 +exp10(number): 1e24 +concat('test', number): test24 + +Row 26: +─────── +exp2(number): 33554432 +exp10(number): 1e25 +concat('test', number): test25 + +Row 27: +─────── +exp2(number): 67108864 +exp10(number): 1e26 +concat('test', number): test26 + +Row 28: +─────── +exp2(number): 134217728 +exp10(number): 1e27 +concat('test', number): test27 + +Row 29: +─────── +exp2(number): 268435456 +exp10(number): 1e28 +concat('test', number): test28 + +Row 30: +─────── +exp2(number): 536870912 +exp10(number): 1e29 +concat('test', number): test29 + +Row 31: +─────── +exp2(number): 1073741824 +exp10(number): 1e30 +concat('test', number): test30 + +Row 32: +─────── +exp2(number): 2147483648 +exp10(number): 1e31 +concat('test', number): test31 + +Row 33: +─────── +exp2(number): 4294967296 +exp10(number): 1e32 +concat('test', number): test32 + +Row 34: +─────── +exp2(number): 8589934592 +exp10(number): 1e33 +concat('test', number): test33 + +Row 35: +─────── +exp2(number): 17179869184 +exp10(number): 1e34 +concat('test', number): test34 + +Row 36: +─────── +exp2(number): 34359738368 +exp10(number): 1e35 +concat('test', number): test35 + +Row 37: +─────── +exp2(number): 68719476736 +exp10(number): 1e36 +concat('test', number): test36 + +Row 38: +─────── +exp2(number): 137438953472 +exp10(number): 1e37 +concat('test', number): test37 + +Row 39: +─────── +exp2(number): 274877906944 +exp10(number): 1e38 +concat('test', number): test38 + +Row 40: +─────── +exp2(number): 549755813888 +exp10(number): 1e39 +concat('test', number): test39 + +Row 41: +─────── +exp2(number): 1099511627776 +exp10(number): 1e40 +concat('test', number): test40 + +Row 42: +─────── +exp2(number): 2199023255552 +exp10(number): 1e41 +concat('test', number): test41 + +Row 43: +─────── +exp2(number): 4398046511104 +exp10(number): 1e42 +concat('test', number): test42 + +Row 44: +─────── +exp2(number): 8796093022208 +exp10(number): 1e43 +concat('test', number): test43 + +Row 45: +─────── +exp2(number): 17592186044416 +exp10(number): 1e44 +concat('test', number): test44 + +Row 46: +─────── +exp2(number): 35184372088832 +exp10(number): 1e45 +concat('test', number): test45 + +Row 47: +─────── +exp2(number): 70368744177664 +exp10(number): 1e46 +concat('test', number): test46 + +Row 48: +─────── +exp2(number): 140737488355328 +exp10(number): 1e47 +concat('test', number): test47 + +Row 49: +─────── +exp2(number): 281474976710656 +exp10(number): 1e48 +concat('test', number): test48 + +Row 50: +─────── +exp2(number): 562949953421312 +exp10(number): 1e49 +concat('test', number): test49 + +Row 51: +─────── +exp2(number): 1125899906842624 +exp10(number): 1e50 +concat('test', number): test50 + +Row 52: +─────── +exp2(number): 2251799813685248 +exp10(number): 1e51 +concat('test', number): test51 + +Row 53: +─────── +exp2(number): 4503599627370496 +exp10(number): 1e52 +concat('test', number): test52 + +Row 54: +─────── +exp2(number): 9007199254740992 +exp10(number): 1e53 +concat('test', number): test53 + +Row 55: +─────── +exp2(number): 18014398509481984 +exp10(number): 1e54 +concat('test', number): test54 + +Row 56: +─────── +exp2(number): 36028797018963970 +exp10(number): 1e55 +concat('test', number): test55 + +Row 57: +─────── +exp2(number): 72057594037927940 +exp10(number): 1e56 +concat('test', number): test56 + +Row 58: +─────── +exp2(number): 144115188075855870 +exp10(number): 1e57 +concat('test', number): test57 + +Row 59: +─────── +exp2(number): 288230376151711740 +exp10(number): 1e58 +concat('test', number): test58 + +Row 60: +─────── +exp2(number): 576460752303423500 +exp10(number): 1e59 +concat('test', number): test59 + +Row 61: +─────── +exp2(number): 1152921504606847000 +exp10(number): 1e60 +concat('test', number): test60 + +Row 62: +─────── +exp2(number): 2305843009213694000 +exp10(number): 1e61 +concat('test', number): test61 + +Row 63: +─────── +exp2(number): 4611686018427388000 +exp10(number): 1e62 +concat('test', number): test62 + +Row 64: +─────── +exp2(number): 9223372036854776000 +exp10(number): 1e63 +concat('test', number): test63 diff --git a/tests/queries/0_stateless/03268_vertical_pretty_numbers.sql b/tests/queries/0_stateless/03268_vertical_pretty_numbers.sql new file mode 100644 index 00000000000..0462134ed63 --- /dev/null +++ b/tests/queries/0_stateless/03268_vertical_pretty_numbers.sql @@ -0,0 +1,11 @@ +SET output_format_pretty_color = 1, output_format_pretty_highlight_digit_groups = 1, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 0, output_format_pretty_highlight_digit_groups = 1, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 1, output_format_pretty_highlight_digit_groups = 0, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 0, output_format_pretty_highlight_digit_groups = 0, output_format_pretty_single_large_number_tip_threshold = 0; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; From a2220233b75a5da3fd5408f77a87df7c6c5e51d2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:25:49 +0100 Subject: [PATCH 296/566] Fix test --- .../0_stateless/02050_clickhouse_local_parsing_exception.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02050_clickhouse_local_parsing_exception.sh b/tests/queries/0_stateless/02050_clickhouse_local_parsing_exception.sh index 7a92fa6fefe..65563837f55 100755 --- a/tests/queries/0_stateless/02050_clickhouse_local_parsing_exception.sh +++ b/tests/queries/0_stateless/02050_clickhouse_local_parsing_exception.sh @@ -4,5 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --query="SELECT number FROM system.numbers INTO OUTFILE test.native.zst FORMAT Native" 2>&1 | grep -q "Code: 62. DB::Exception: Syntax error: failed at position 48 ('test'): test.native.zst FORMAT Native. Expected string literal." && echo 'OK' || echo 'FAIL' ||: - +$CLICKHOUSE_LOCAL --query="SELECT number FROM system.numbers INTO OUTFILE test.native.zst FORMAT Native" 2>&1 | grep -q "Code: 62. DB::Exception: Syntax error: failed at position 48 ('test'): test.native.zst FORMAT Native." && echo 'OK' || echo 'FAIL' ||: From 5ceb19453d108163880e9d7fdd06ec4858606c52 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:26:58 +0100 Subject: [PATCH 297/566] Fix style --- src/Formats/PrettyFormatHelpers.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Formats/PrettyFormatHelpers.h b/src/Formats/PrettyFormatHelpers.h index 72ab5e3c2a0..b5d679c5a42 100644 --- a/src/Formats/PrettyFormatHelpers.h +++ b/src/Formats/PrettyFormatHelpers.h @@ -1,5 +1,8 @@ +#pragma once + #include + namespace DB { From dd5a573302a3e38e15b3bfbc8cafe75cdc22cc7c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:50:13 +0100 Subject: [PATCH 298/566] Reset MergeTree to master --- src/Storages/MergeTree/DataPartsExchange.cpp | 2 +- .../MergeTree/FutureMergedMutatedPart.h | 1 + src/Storages/MergeTree/IDataPartStorage.h | 3 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 3 + .../MergeTree/IMergeTreeDataPartWriter.cpp | 29 +- .../MergeTree/IMergeTreeDataPartWriter.h | 8 + src/Storages/MergeTree/IMergeTreeReader.h | 1 + .../MergeTree/IMergedBlockOutputStream.cpp | 1 - .../MergeTree/IMergedBlockOutputStream.h | 11 +- src/Storages/MergeTree/KeyCondition.cpp | 25 ++ .../MergeTree/MergeFromLogEntryTask.cpp | 9 +- .../MergeTree/MergePlainMergeTreeTask.cpp | 37 +- .../MergeTree/MergeProjectionPartsTask.cpp | 3 + .../MergeSelectors/TrivialMergeSelector.cpp | 94 +++++ .../MergeSelectors/TrivialMergeSelector.h | 32 ++ .../MergeSelectors/registerMergeSelectors.cpp | 2 + src/Storages/MergeTree/MergeTask.cpp | 42 +- src/Storages/MergeTree/MergeTask.h | 9 + src/Storages/MergeTree/MergeTreeData.cpp | 160 ++++++- src/Storages/MergeTree/MergeTreeData.h | 6 +- .../MergeTree/MergeTreeDataFormatVersion.h | 4 +- .../MergeTree/MergeTreeDataMergerMutator.cpp | 34 +- .../MergeTree/MergeTreeDataMergerMutator.h | 2 + .../MergeTree/MergeTreeDataPartBuilder.cpp | 18 +- .../MergeTree/MergeTreeDataPartBuilder.h | 12 +- .../MergeTree/MergeTreeDataPartCompact.cpp | 35 +- .../MergeTree/MergeTreeDataPartCompact.h | 2 + .../MergeTree/MergeTreeDataPartType.h | 1 + .../MergeTree/MergeTreeDataPartWide.cpp | 50 ++- .../MergeTree/MergeTreeDataPartWide.h | 2 + .../MergeTreeDataPartWriterCompact.cpp | 56 +-- .../MergeTreeDataPartWriterCompact.h | 8 +- .../MergeTreeDataPartWriterOnDisk.cpp | 206 ++++----- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 40 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 81 ++-- .../MergeTree/MergeTreeDataPartWriterWide.h | 13 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 43 +- .../MergeTree/MergeTreeDataSelectExecutor.h | 6 +- .../MergeTree/MergeTreeDataWriter.cpp | 15 +- .../MergeTree/MergeTreeIOSettings.cpp | 4 +- src/Storages/MergeTree/MergeTreeIOSettings.h | 5 +- .../MergeTree/MergeTreeIndexGranularity.cpp | 18 +- .../MergeTree/MergeTreeIndexGranularity.h | 2 +- .../MergeTreeIndexGranularityInfo.cpp | 8 + .../MergeTree/MergeTreeIndexGranularityInfo.h | 1 + .../MergeTreeIndexVectorSimilarity.cpp | 67 +-- .../MergeTreeIndexVectorSimilarity.h | 14 +- .../MergeTree/MergeTreeMarksLoader.cpp | 29 ++ src/Storages/MergeTree/MergeTreeMarksLoader.h | 13 +- .../MergeTree/MergeTreeMutationStatus.cpp | 4 +- src/Storages/MergeTree/MergeTreePartInfo.h | 7 + .../MergeTree/MergeTreePartsMover.cpp | 2 +- .../MergeTree/MergeTreePrefetchedReadPool.cpp | 36 +- .../MergeTree/MergeTreePrefetchedReadPool.h | 1 + src/Storages/MergeTree/MergeTreeRangeReader.h | 2 +- src/Storages/MergeTree/MergeTreeReadPool.cpp | 2 + src/Storages/MergeTree/MergeTreeReadPool.h | 1 + .../MergeTree/MergeTreeReadPoolBase.cpp | 105 +++-- .../MergeTree/MergeTreeReadPoolBase.h | 4 + .../MergeTree/MergeTreeReadPoolInOrder.cpp | 2 + .../MergeTree/MergeTreeReadPoolInOrder.h | 1 + .../MergeTreeReadPoolParallelReplicas.cpp | 2 + .../MergeTreeReadPoolParallelReplicas.h | 1 + ...rgeTreeReadPoolParallelReplicasInOrder.cpp | 2 + ...MergeTreeReadPoolParallelReplicasInOrder.h | 1 + src/Storages/MergeTree/MergeTreeReadTask.cpp | 25 +- src/Storages/MergeTree/MergeTreeReadTask.h | 10 +- .../MergeTree/MergeTreeReaderWide.cpp | 2 +- .../MergeTree/MergeTreeSelectAlgorithms.cpp | 5 +- .../MergeTree/MergeTreeSelectAlgorithms.h | 8 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- .../MergeTree/MergeTreeSelectProcessor.h | 1 - src/Storages/MergeTree/MergeTreeSettings.cpp | 397 +++++++++--------- src/Storages/MergeTree/MergeTreeSink.cpp | 21 +- .../MergeTree/MergedBlockOutputStream.cpp | 2 + .../MergeTree/MergedBlockOutputStream.h | 1 + .../MergedColumnOnlyOutputStream.cpp | 5 +- .../MergeTree/MergedColumnOnlyOutputStream.h | 1 + .../MergeTree/MutateFromLogEntryTask.cpp | 4 + .../MergeTree/MutatePlainMergeTreeTask.cpp | 4 + src/Storages/MergeTree/MutateTask.cpp | 5 +- .../ReplicatedMergeTreeAttachThread.cpp | 90 +--- .../ReplicatedMergeTreeAttachThread.h | 2 - .../MergeTree/ReplicatedMergeTreeQueue.cpp | 2 +- .../MergeTree/ReplicatedMergeTreeQueue.h | 1 + .../ReplicatedMergeTreeRestartingThread.cpp | 92 ++++ .../ReplicatedMergeTreeRestartingThread.h | 4 + .../MergeTree/ReplicatedMergeTreeSink.cpp | 36 +- src/Storages/MergeTree/checkDataPart.cpp | 2 +- 90 files changed, 1398 insertions(+), 768 deletions(-) create mode 100644 src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.cpp create mode 100644 src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.h diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index e13ec5a7515..1d79ae5aacb 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -908,7 +908,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( { part_storage_for_loading->commitTransaction(); - MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir); + MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir, getReadSettings()); new_data_part = builder.withPartFormatFromDisk().build(); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); diff --git a/src/Storages/MergeTree/FutureMergedMutatedPart.h b/src/Storages/MergeTree/FutureMergedMutatedPart.h index 09fb7b01678..ca607bb4e33 100644 --- a/src/Storages/MergeTree/FutureMergedMutatedPart.h +++ b/src/Storages/MergeTree/FutureMergedMutatedPart.h @@ -22,6 +22,7 @@ struct FutureMergedMutatedPart MergeTreeDataPartFormat part_format; MergeTreePartInfo part_info; MergeTreeData::DataPartsVector parts; + std::vector blocking_parts_to_remove; MergeType merge_type = MergeType::Regular; const MergeTreePartition & getPartition() const { return parts.front()->partition; } diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index a09c24c63ab..49d9fbf2291 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -1,5 +1,4 @@ #pragma once -#include #include #include #include @@ -16,7 +15,7 @@ namespace DB { - +struct ReadSettings; class ReadBufferFromFileBase; class WriteBufferFromFileBase; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..41783ffddb0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -833,7 +833,7 @@ MergeTreeDataPartBuilder IMergeTreeDataPart::getProjectionPartBuilder(const Stri { const char * projection_extension = is_temp_projection ? ".tmp_proj" : ".proj"; auto projection_storage = getDataPartStorage().getProjection(projection_name + projection_extension, !is_temp_projection); - MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage); + MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage, getReadSettings()); return builder.withPartInfo(MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION).withParentPart(this); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 378832d32a1..b41a1d840e1 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -180,6 +180,9 @@ public: void loadRowsCountFileForUnexpectedPart(); + /// Loads marks and saves them into mark cache for specified columns. + virtual void loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const = 0; + String getMarksFileExtension() const { return index_granularity_info.mark_type.getFileExtension(); } /// Generate the new name for this part according to `new_part_info` and min/max dates from the old name. diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index a9f188338e1..dbfdbbdea88 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -91,6 +91,13 @@ Columns IMergeTreeDataPartWriter::releaseIndexColumns() return result; } +PlainMarksByName IMergeTreeDataPartWriter::releaseCachedMarks() +{ + PlainMarksByName res; + std::swap(cached_marks, res); + return res; +} + SerializationPtr IMergeTreeDataPartWriter::getSerialization(const String & column_name) const { auto it = serializations.find(column_name); @@ -178,24 +185,9 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( const MergeTreeIndexGranularity & computed_index_granularity) { if (part_type == MergeTreeDataPartType::Compact) - return createMergeTreeDataPartCompactWriter( - data_part_name_, - logger_name_, - serializations_, - data_part_storage_, - index_granularity_info_, - storage_settings_, - columns_list, - column_positions, - metadata_snapshot, - virtual_columns, - indices_to_recalc, - stats_to_recalc_, - marks_file_extension_, - default_codec_, - writer_settings, - computed_index_granularity); - + return createMergeTreeDataPartCompactWriter(data_part_name_, logger_name_, serializations_, data_part_storage_, + index_granularity_info_, storage_settings_, columns_list, column_positions, metadata_snapshot, virtual_columns, indices_to_recalc, stats_to_recalc_, + marks_file_extension_, default_codec_, writer_settings, computed_index_granularity); if (part_type == MergeTreeDataPartType::Wide) return createMergeTreeDataPartWideWriter( data_part_name_, @@ -213,7 +205,6 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( default_codec_, writer_settings, computed_index_granularity); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown part type: {}", part_type.toString()); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index eb51a1b2922..d1c76505d7c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB @@ -45,7 +46,12 @@ public: virtual void finish(bool sync) = 0; + virtual size_t getNumberOfOpenStreams() const = 0; + Columns releaseIndexColumns(); + + PlainMarksByName releaseCachedMarks(); + const MergeTreeIndexGranularity & getIndexGranularity() const { return index_granularity; } protected: @@ -69,6 +75,8 @@ protected: MutableDataPartStoragePtr data_part_storage; MutableColumns index_columns; MergeTreeIndexGranularity index_granularity; + /// Marks that will be saved to cache on finish. + PlainMarksByName cached_marks; }; using MergeTreeDataPartWriterPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/IMergeTreeReader.h b/src/Storages/MergeTree/IMergeTreeReader.h index d799ce57b40..c68617d3995 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.h +++ b/src/Storages/MergeTree/IMergeTreeReader.h @@ -18,6 +18,7 @@ public: using ValueSizeMap = std::map; using VirtualFields = std::unordered_map; using DeserializeBinaryBulkStateMap = std::map; + using FileStreams = std::map>; IMergeTreeReader( MergeTreeDataPartInfoForReaderPtr data_part_info_for_read_, diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 209b274ee6a..eb904a8e2ef 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -4,7 +4,6 @@ #include #include - namespace DB { diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index f67cf66ee50..7dd6d720170 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -7,7 +7,6 @@ #include #include - namespace DB { @@ -35,6 +34,16 @@ public: return writer->getIndexGranularity(); } + PlainMarksByName releaseCachedMarks() + { + return writer->releaseCachedMarks(); + } + + size_t getNumberOfOpenStreams() const + { + return writer->getNumberOfOpenStreams(); + } + protected: /// Remove all columns marked expired in data_part. Also, clears checksums diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 1506dc38946..17723d341fb 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1446,6 +1447,30 @@ public: IFunctionBase::Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override { + if (const auto * adaptor = typeid_cast(func.get())) + { + if (dynamic_cast(adaptor->getFunction().get()) && kind == Kind::RIGHT_CONST) + { + auto time_zone = extractTimeZoneNameFromColumn(const_arg.column.get(), const_arg.name); + + const IDataType * type_ptr = &type; + if (const auto * low_cardinality_type = typeid_cast(type_ptr)) + type_ptr = low_cardinality_type->getDictionaryType().get(); + + if (type_ptr->isNullable()) + type_ptr = static_cast(*type_ptr).getNestedType().get(); + + DataTypePtr type_with_time_zone; + if (typeid_cast(type_ptr)) + type_with_time_zone = std::make_shared(time_zone); + else if (const auto * dt64 = typeid_cast(type_ptr)) + type_with_time_zone = std::make_shared(dt64->getScale(), time_zone); + else + return {}; /// In case we will have other types with time zone + + return func->getMonotonicityForRange(*type_with_time_zone, left, right); + } + } return func->getMonotonicityForRange(type, left, right); } diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 56d7133dfc3..d7e807c689f 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -335,6 +335,10 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() future_merged_part, task_context); + storage.writePartLog( + PartLogElement::MERGE_PARTS_START, {}, 0, + entry.new_part_name, part, parts, merge_mutate_entry.get(), {}); + transaction_ptr = std::make_unique(storage, NO_TRANSACTION_RAW); merge_task = storage.merger_mutator.mergePartsToTemporaryPart( @@ -352,7 +356,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() storage.merging_params, NO_TRANSACTION_PTR); - /// Adjust priority for (auto & item : future_merged_part->parts) priority.value += item->getBytesOnDisk(); @@ -368,6 +371,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) { part = merge_task->getFuture().get(); + auto cached_marks = merge_task->releaseCachedMarks(); storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr); /// Why we reset task here? Because it holds shared pointer to part and tryRemovePartImmediately will @@ -441,6 +445,9 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite finish_callback = [storage_ptr = &storage]() { storage_ptr->merge_selecting_task->schedule(); }; ProfileEvents::increment(ProfileEvents::ReplicatedPartMerges); + if (auto * mark_cache = storage.getContext()->getMarkCache().get()) + addMarksToCache(*part, cached_marks, mark_cache); + write_part_log({}); StorageReplicatedMergeTree::incrementMergedPartsProfileEvent(part->getType()); diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp index be44177847c..6aca58faf47 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp @@ -92,6 +92,10 @@ void MergePlainMergeTreeTask::prepare() future_part, task_context); + storage.writePartLog( + PartLogElement::MERGE_PARTS_START, {}, 0, + future_part->name, new_part, future_part->parts, merge_list_entry.get(), {}); + write_part_log = [this] (const ExecutionStatus & execution_status) { auto profile_counters_snapshot = std::make_shared(profile_counters.getPartiallyAtomicSnapshot()); @@ -121,19 +125,19 @@ void MergePlainMergeTreeTask::prepare() }; merge_task = storage.merger_mutator.mergePartsToTemporaryPart( - future_part, - metadata_snapshot, - merge_list_entry.get(), - {} /* projection_merge_list_element */, - table_lock_holder, - time(nullptr), - task_context, - merge_mutate_entry->tagger->reserved_space, - deduplicate, - deduplicate_by_columns, - cleanup, - storage.merging_params, - txn); + future_part, + metadata_snapshot, + merge_list_entry.get(), + {} /* projection_merge_list_element */, + table_lock_holder, + time(nullptr), + task_context, + merge_mutate_entry->tagger->reserved_space, + deduplicate, + deduplicate_by_columns, + cleanup, + storage.merging_params, + txn); } @@ -148,6 +152,12 @@ void MergePlainMergeTreeTask::finish() ThreadFuzzer::maybeInjectSleep(); ThreadFuzzer::maybeInjectMemoryLimitException(); + if (auto * mark_cache = storage.getContext()->getMarkCache().get()) + { + auto marks = merge_task->releaseCachedMarks(); + addMarksToCache(*new_part, marks, mark_cache); + } + write_part_log({}); StorageMergeTree::incrementMergedPartsProfileEvent(new_part->getType()); transfer_profile_counters_to_initial_query(); @@ -159,7 +169,6 @@ void MergePlainMergeTreeTask::finish() ThreadFuzzer::maybeInjectSleep(); ThreadFuzzer::maybeInjectMemoryLimitException(); } - } ContextMutablePtr MergePlainMergeTreeTask::createTaskContext() const diff --git a/src/Storages/MergeTree/MergeProjectionPartsTask.cpp b/src/Storages/MergeTree/MergeProjectionPartsTask.cpp index 4e1bb2f11a7..34cd925a8c6 100644 --- a/src/Storages/MergeTree/MergeProjectionPartsTask.cpp +++ b/src/Storages/MergeTree/MergeProjectionPartsTask.cpp @@ -83,6 +83,9 @@ bool MergeProjectionPartsTask::executeStep() ".tmp_proj"); next_level_parts.push_back(executeHere(tmp_part_merge_task)); + /// FIXME (alesapin) we should use some temporary storage for this, + /// not commit each subprojection part + next_level_parts.back()->getDataPartStorage().commitTransaction(); next_level_parts.back()->is_temp = true; } diff --git a/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.cpp b/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.cpp new file mode 100644 index 00000000000..cd1fa7b01cd --- /dev/null +++ b/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.cpp @@ -0,0 +1,94 @@ +#include +#include + +#include +#include + +#include + + +namespace DB +{ + +void registerTrivialMergeSelector(MergeSelectorFactory & factory) +{ + factory.registerPublicSelector("Trivial", MergeSelectorAlgorithm::TRIVIAL, [](const std::any &) + { + return std::make_shared(); + }); +} + +TrivialMergeSelector::PartsRange TrivialMergeSelector::select( + const PartsRanges & parts_ranges, + size_t max_total_size_to_merge) +{ + size_t num_partitions = parts_ranges.size(); + if (num_partitions == 0) + return {}; + + /// Sort partitions from the largest to smallest in the number of parts. + std::vector sorted_partition_indices; + sorted_partition_indices.reserve(num_partitions); + for (size_t i = 0; i < num_partitions; ++i) + if (parts_ranges[i].size() >= settings.num_parts_to_merge) + sorted_partition_indices.emplace_back(i); + + if (sorted_partition_indices.empty()) + return {}; + + std::sort(sorted_partition_indices.begin(), sorted_partition_indices.end(), + [&](size_t i, size_t j){ return parts_ranges[i].size() > parts_ranges[j].size(); }); + + size_t partition_idx = 0; + size_t left = 0; + size_t right = 0; + + std::vector candidates; + while (candidates.size() < settings.num_ranges_to_choose) + { + const PartsRange & partition = parts_ranges[partition_idx]; + + if (1 + right - left == settings.num_parts_to_merge) + { + ++right; + + size_t total_size = 0; + for (size_t i = left; i < right; ++i) + total_size += partition[i].size; + + if (!max_total_size_to_merge || total_size <= max_total_size_to_merge) + { + candidates.emplace_back(partition.data() + left, partition.data() + right); + if (candidates.size() == settings.num_ranges_to_choose) + break; + } + + left = right; + } + + if (partition.size() - left < settings.num_parts_to_merge) + { + ++partition_idx; + if (partition_idx == sorted_partition_indices.size()) + break; + + left = 0; + right = 0; + } + + ++right; + + if (right < partition.size() && partition[right].level < partition[left].level) + left = right; + } + + if (candidates.empty()) + return {}; + + if (candidates.size() == 1) + return candidates[0]; + + return candidates[thread_local_rng() % candidates.size()]; +} + +} diff --git a/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.h b/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.h new file mode 100644 index 00000000000..6d989aea0fb --- /dev/null +++ b/src/Storages/MergeTree/MergeSelectors/TrivialMergeSelector.h @@ -0,0 +1,32 @@ +#pragma once + +#include + + +namespace DB +{ + +/** Go through partitions starting from the largest (in the number of parts). + * Go through parts from left to right. + * Find the first range of N parts where their level is not decreasing. + * Then continue finding these ranges and find up to M of these ranges. + * Choose a random one from them. + */ +class TrivialMergeSelector : public IMergeSelector +{ +public: + struct Settings + { + size_t num_parts_to_merge = 10; + size_t num_ranges_to_choose = 100; + }; + + PartsRange select( + const PartsRanges & parts_ranges, + size_t max_total_size_to_merge) override; + +private: + const Settings settings; +}; + +} diff --git a/src/Storages/MergeTree/MergeSelectors/registerMergeSelectors.cpp b/src/Storages/MergeTree/MergeSelectors/registerMergeSelectors.cpp index 61f941adc36..6a3c1ef4b2b 100644 --- a/src/Storages/MergeTree/MergeSelectors/registerMergeSelectors.cpp +++ b/src/Storages/MergeTree/MergeSelectors/registerMergeSelectors.cpp @@ -7,6 +7,7 @@ namespace DB void registerSimpleMergeSelector(MergeSelectorFactory & factory); void registerStochasticSimpleMergeSelector(MergeSelectorFactory & factory); +void registerTrivialMergeSelector(MergeSelectorFactory & factory); void registerAllMergeSelector(MergeSelectorFactory & factory); void registerTTLDeleteMergeSelector(MergeSelectorFactory & factory); void registerTTLRecompressMergeSelector(MergeSelectorFactory & factory); @@ -17,6 +18,7 @@ void registerMergeSelectors() registerSimpleMergeSelector(factory); registerStochasticSimpleMergeSelector(factory); + registerTrivialMergeSelector(factory); registerAllMergeSelector(factory); registerTTLDeleteMergeSelector(factory); registerTTLRecompressMergeSelector(factory); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index b03fb1b12cf..08066113375 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -40,10 +40,22 @@ #include #include +#ifndef NDEBUG + #include +#endif + +#ifdef CLICKHOUSE_CLOUD + #include + #include + #include + #include +#endif + namespace ProfileEvents { extern const Event Merge; + extern const Event MergeSourceParts; extern const Event MergedColumns; extern const Event GatheredColumns; extern const Event MergeTotalMilliseconds; @@ -81,6 +93,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsUInt64 vertical_merge_algorithm_min_columns_to_activate; extern const MergeTreeSettingsUInt64 vertical_merge_algorithm_min_rows_to_activate; extern const MergeTreeSettingsBool vertical_merge_remote_filesystem_prefetch; + extern const MergeTreeSettingsBool prewarm_mark_cache; } namespace ErrorCodes @@ -295,6 +308,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const { ProfileEvents::increment(ProfileEvents::Merge); + ProfileEvents::increment(ProfileEvents::MergeSourceParts, global_ctx->future_part->parts.size()); String local_tmp_prefix; if (global_ctx->need_prefix) @@ -335,13 +349,13 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const if (global_ctx->parent_part) { auto data_part_storage = global_ctx->parent_part->getDataPartStorage().getProjection(local_tmp_part_basename, /* use parent transaction */ false); - builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage); + builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage, getReadSettings()); builder->withParentPart(global_ctx->parent_part); } else { auto local_single_disk_volume = std::make_shared("volume_" + global_ctx->future_part->name, global_ctx->disk, 0); - builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename)); + builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename, getReadSettings())); builder->withPartStorageType(global_ctx->future_part->part_format.storage_type); } @@ -533,6 +547,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } } + bool save_marks_in_cache = (*global_ctx->data->getSettings())[MergeTreeSetting::prewarm_mark_cache] && global_ctx->context->getMarkCache(); + global_ctx->to = std::make_shared( global_ctx->new_data_part, global_ctx->metadata_snapshot, @@ -542,6 +558,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const ctx->compression_codec, global_ctx->txn ? global_ctx->txn->tid : Tx::PrehistoricTID, /*reset_columns=*/ true, + save_marks_in_cache, ctx->blocks_are_granules_size, global_ctx->context->getWriteSettings()); @@ -1072,6 +1089,8 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const ctx->executor = std::make_unique(ctx->column_parts_pipeline); NamesAndTypesList columns_list = {*ctx->it_name_and_type}; + bool save_marks_in_cache = (*global_ctx->data->getSettings())[MergeTreeSetting::prewarm_mark_cache] && global_ctx->context->getMarkCache(); + ctx->column_to = std::make_unique( global_ctx->new_data_part, global_ctx->metadata_snapshot, @@ -1080,6 +1099,7 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const column_pipepline.indexes_to_recalc, getStatisticsForColumns(columns_list, global_ctx->metadata_snapshot), &global_ctx->written_offset_columns, + save_marks_in_cache, global_ctx->to->getIndexGranularity()); ctx->column_elems_written = 0; @@ -1117,6 +1137,10 @@ void MergeTask::VerticalMergeStage::finalizeVerticalMergeForOneColumn() const auto changed_checksums = ctx->column_to->fillChecksums(global_ctx->new_data_part, global_ctx->checksums_gathered_columns); global_ctx->checksums_gathered_columns.add(std::move(changed_checksums)); + auto cached_marks = ctx->column_to->releaseCachedMarks(); + for (auto & [name, marks] : cached_marks) + global_ctx->cached_marks.emplace(name, std::move(marks)); + ctx->delayed_streams.emplace_back(std::move(ctx->column_to)); while (ctx->delayed_streams.size() > ctx->max_delayed_streams) @@ -1263,6 +1287,10 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const else global_ctx->to->finalizePart(global_ctx->new_data_part, ctx->need_sync, &global_ctx->storage_columns, &global_ctx->checksums_gathered_columns); + auto cached_marks = global_ctx->to->releaseCachedMarks(); + for (auto & [name, marks] : cached_marks) + global_ctx->cached_marks.emplace(name, std::move(marks)); + global_ctx->new_data_part->getDataPartStorage().precommitTransaction(); global_ctx->promise.set_value(global_ctx->new_data_part); @@ -1385,7 +1413,7 @@ bool MergeTask::execute() } -/// Apply merge strategy (Ordinary, Colapsing, Aggregating, etc) to the stream +/// Apply merge strategy (Ordinary, Collapsing, Aggregating, etc) to the stream class MergePartsStep : public ITransformingStep { public: @@ -1421,7 +1449,7 @@ public: /// that is going in insertion order. ProcessorPtr merged_transform; - const auto &header = pipeline.getHeader(); + const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); WriteBuffer * rows_sources_write_buf = nullptr; @@ -1690,7 +1718,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1759,6 +1787,10 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); + // Merges are not using concurrency control now. Queries and merges running together could lead to CPU overcommit. + // TODO(serxa): Enable concurrency control for merges. This should be done after CPU scheduler introduction. + builder->setConcurrencyControl(false); + global_ctx->merged_pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); } diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 5a4fb1ec0b8..53792165987 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -132,6 +133,13 @@ public: return nullptr; } + PlainMarksByName releaseCachedMarks() const + { + PlainMarksByName res; + std::swap(global_ctx->cached_marks, res); + return res; + } + bool execute(); private: @@ -209,6 +217,7 @@ private: std::promise promise{}; IMergedBlockOutputStream::WrittenOffsetColumns written_offset_columns{}; + PlainMarksByName cached_marks; MergeTreeTransactionPtr txn; bool need_prefix; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 72a41fcf2c1..b2f35d0a309 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -154,6 +155,7 @@ namespace namespace DB { + namespace Setting { extern const SettingsBool allow_drop_detached; @@ -229,6 +231,12 @@ namespace MergeTreeSetting extern const MergeTreeSettingsString storage_policy; extern const MergeTreeSettingsFloat zero_copy_concurrent_part_removal_max_postpone_ratio; extern const MergeTreeSettingsUInt64 zero_copy_concurrent_part_removal_max_split_times; + extern const MergeTreeSettingsBool prewarm_mark_cache; +} + +namespace ServerSetting +{ + extern const ServerSettingsDouble mark_cache_prewarm_ratio; } namespace ErrorCodes @@ -261,6 +269,7 @@ namespace ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int TOO_MANY_SIMULTANEOUS_QUERIES; extern const int INCORRECT_QUERY; + extern const int INVALID_SETTING_VALUE; extern const int CANNOT_RESTORE_TABLE; extern const int ZERO_COPY_REPLICATION_ERROR; extern const int NOT_INITIALIZED; @@ -759,6 +768,16 @@ void MergeTreeData::checkProperties( } } + /// If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. + /// SET allow_experimental_vector_similarity_index = 1; + /// CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; + /// INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); + /// WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; + /// As a workaround, force enabled adaptive index granularity for now (it is the default anyways). + if (new_metadata.secondary_indices.hasType("vector_similarity") && (*getSettings())[MergeTreeSetting::index_granularity_bytes] == 0) + throw Exception(ErrorCodes::INVALID_SETTING_VALUE, + "Experimental vector similarity index can only be used with MergeTree setting 'index_granularity_bytes' != 0"); + if (!new_metadata.projections.empty()) { std::unordered_set projections_names; @@ -1423,7 +1442,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) try { - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -1438,7 +1457,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1472,7 +1491,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1493,7 +1512,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( try { - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -2324,6 +2343,60 @@ void MergeTreeData::stopOutdatedAndUnexpectedDataPartsLoadingTask() } } +void MergeTreeData::prewarmMarkCacheIfNeeded(ThreadPool & pool) +{ + if (!(*getSettings())[MergeTreeSetting::prewarm_mark_cache]) + return; + + prewarmMarkCache(pool); +} + +void MergeTreeData::prewarmMarkCache(ThreadPool & pool) +{ + auto * mark_cache = getContext()->getMarkCache().get(); + if (!mark_cache) + return; + + auto metadata_snaphost = getInMemoryMetadataPtr(); + auto column_names = getColumnsToPrewarmMarks(*getSettings(), metadata_snaphost->getColumns().getAllPhysical()); + + if (column_names.empty()) + return; + + Stopwatch watch; + LOG_TRACE(log, "Prewarming mark cache"); + + auto data_parts = getDataPartsVectorForInternalUsage(); + + /// Prewarm mark cache firstly for the most fresh parts according + /// to time columns in partition key (if exists) and by modification time. + + auto to_tuple = [](const auto & part) + { + return std::make_tuple(part->getMinMaxDate().second, part->getMinMaxTime().second, part->modification_time); + }; + + std::sort(data_parts.begin(), data_parts.end(), [&to_tuple](const auto & lhs, const auto & rhs) + { + return to_tuple(lhs) > to_tuple(rhs); + }); + + ThreadPoolCallbackRunnerLocal runner(pool, "PrewarmMarks"); + double ratio_to_prewarm = getContext()->getServerSettings()[ServerSetting::mark_cache_prewarm_ratio]; + + for (const auto & part : data_parts) + { + if (mark_cache->sizeInBytes() >= mark_cache->maxSizeInBytes() * ratio_to_prewarm) + break; + + runner([&] { part->loadMarksToCache(column_names, mark_cache); }); + } + + runner.waitForAllToFinishAndRethrowFirstError(); + watch.stop(); + LOG_TRACE(log, "Prewarmed mark cache in {} seconds", watch.elapsedSeconds()); +} + /// Is the part directory old. /// True if its modification time and the modification time of all files inside it is less then threshold. /// (Only files on the first level of nesting are considered). @@ -2655,6 +2728,10 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa for (const auto & part : parts) { part_log_elem.partition_id = part->info.partition_id; + { + WriteBufferFromString out(part_log_elem.partition); + part->partition.serializeText(part->storage, out, {}); + } part_log_elem.part_name = part->name; part_log_elem.bytes_compressed_on_disk = part->getBytesOnDisk(); part_log_elem.bytes_uncompressed = part->getBytesUncompressedOnDisk(); @@ -3310,6 +3387,16 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental vector similarity index is disabled (turn on setting 'allow_experimental_vector_similarity_index')"); + /// If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. + /// SET allow_experimental_vector_similarity_index = 1; + /// CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; + /// INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); + /// WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; + /// As a workaround, force enabled adaptive index granularity for now (it is the default anyways). + if (AlterCommands::hasVectorSimilarityIndex(new_metadata) && (*getSettings())[MergeTreeSetting::index_granularity_bytes] == 0) + throw Exception(ErrorCodes::INVALID_SETTING_VALUE, + "Experimental vector similarity index can only be used with MergeTree setting 'index_granularity_bytes' != 0"); + for (const auto & disk : getDisks()) if (!disk->supportsHardLinks() && !commands.isSettingsAlter() && !commands.isCommentAlter()) throw Exception( @@ -3622,6 +3709,9 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context const auto & new_changes = new_metadata.settings_changes->as().changes; local_context->checkMergeTreeSettingsConstraints(*settings_from_storage, new_changes); + bool found_disk_setting = false; + bool found_storage_policy_setting = false; + for (const auto & changed_setting : new_changes) { const auto & setting_name = changed_setting.name; @@ -3645,9 +3735,22 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context } if (setting_name == "storage_policy") + { checkStoragePolicy(local_context->getStoragePolicy(new_value.safeGet())); + found_storage_policy_setting = true; + } + else if (setting_name == "disk") + { + checkStoragePolicy(local_context->getStoragePolicyFromDisk(new_value.safeGet())); + found_disk_setting = true; + } } + if (found_storage_policy_setting && found_disk_setting) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "MergeTree settings `storage_policy` and `disk` cannot be specified at the same time"); + /// Check if it is safe to reset the settings for (const auto & current_setting : current_changes) { @@ -3732,9 +3835,9 @@ MergeTreeDataPartFormat MergeTreeData::choosePartFormatOnDisk(size_t bytes_uncom } MergeTreeDataPartBuilder MergeTreeData::getDataPartBuilder( - const String & name, const VolumePtr & volume, const String & part_dir) const + const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const { - return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir); + return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir, read_settings_); } void MergeTreeData::changeSettings( @@ -3746,12 +3849,16 @@ void MergeTreeData::changeSettings( bool has_storage_policy_changed = false; const auto & new_changes = new_settings->as().changes; + StoragePolicyPtr new_storage_policy = nullptr; for (const auto & change : new_changes) { - if (change.name == "storage_policy") + if (change.name == "disk" || change.name == "storage_policy") { - StoragePolicyPtr new_storage_policy = getContext()->getStoragePolicy(change.value.safeGet()); + if (change.name == "disk") + new_storage_policy = getContext()->getStoragePolicyFromDisk(change.value.safeGet()); + else + new_storage_policy = getContext()->getStoragePolicy(change.value.safeGet()); StoragePolicyPtr old_storage_policy = getStoragePolicy(); /// StoragePolicy of different version or name is guaranteed to have different pointer @@ -5812,7 +5919,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons /// Load this part from the directory `temp_part_dir`. auto load_part = [&] { - MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name); + MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()); builder.withPartFormatFromDisk(); part = std::move(builder).build(); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); @@ -5827,7 +5934,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons if (!part) { /// Make a fake data part only to copy its files to /detached/. - part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name} + part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()} .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -6326,6 +6433,12 @@ DetachedPartsInfo MergeTreeData::getDetachedParts() const for (const auto & disk : getDisks()) { + /// While it is possible to have detached parts on readonly/write-once disks + /// (if they were produced on another machine, where it wasn't readonly) + /// to avoid wasting resources for slow disks, avoid trying to enumerate them. + if (disk->isReadOnly() || disk->isWriteOnce()) + continue; + String detached_path = fs::path(relative_data_path) / DETACHED_DIR_NAME; /// Note: we don't care about TOCTOU issue here. @@ -6473,7 +6586,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const LOG_DEBUG(log, "Checking part {}", new_name); auto single_disk_volume = std::make_shared("volume_" + old_name, disk); - auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name) + auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -7528,7 +7641,7 @@ std::pair MergeTreeData::cloneAn std::string(fs::path(dst_part_storage->getFullRootPath()) / tmp_dst_part_name), with_copy); - auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage) + auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -7874,7 +7987,8 @@ try part_log_elem.event_type = type; - if (part_log_elem.event_type == PartLogElement::MERGE_PARTS) + if (part_log_elem.event_type == PartLogElement::MERGE_PARTS + || part_log_elem.event_type == PartLogElement::MERGE_PARTS_START) { if (merge_entry) { @@ -7899,6 +8013,20 @@ try part_log_elem.table_name = table_id.table_name; part_log_elem.table_uuid = table_id.uuid; part_log_elem.partition_id = MergeTreePartInfo::fromPartName(new_part_name, format_version).partition_id; + + { + const DataPart * result_or_source_data_part = nullptr; + if (result_part) + result_or_source_data_part = result_part.get(); + else if (!source_parts.empty()) + result_or_source_data_part = source_parts.at(0).get(); + if (result_or_source_data_part) + { + WriteBufferFromString out(part_log_elem.partition); + result_or_source_data_part->partition.serializeText(*this, out, {}); + } + } + part_log_elem.part_name = new_part_name; if (result_part) @@ -7928,10 +8056,6 @@ try { part_log_elem.profile_counters = profile_counters; } - else - { - LOG_WARNING(log, "Profile counters are not set"); - } part_log->add(std::move(part_log_elem)); } @@ -8786,7 +8910,7 @@ std::pair MergeTreeData::createE VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume); auto tmp_dir_holder = getTemporaryPartDirectoryHolder(EMPTY_PART_TMP_PREFIX + new_part_name); - auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name) + auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name, getReadSettings()) .withBytesAndRowsOnDisk(0, 0) .withPartInfo(new_part_info) .build(); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 7a9730e8627..fe360907875 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -241,7 +241,7 @@ public: MergeTreeDataPartFormat choosePartFormat(size_t bytes_uncompressed, size_t rows_count) const; MergeTreeDataPartFormat choosePartFormatOnDisk(size_t bytes_uncompressed, size_t rows_count) const; - MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir) const; + MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const; /// Auxiliary object to add a set of parts into the working set in two steps: /// * First, as PreActive parts (the parts are ready, but not yet in the active set). @@ -506,6 +506,10 @@ public: /// Load the set of data parts from disk. Call once - immediately after the object is created. void loadDataParts(bool skip_sanity_checks, std::optional> expected_parts); + /// Prewarm mark cache for the most recent data parts. + void prewarmMarkCache(ThreadPool & pool); + void prewarmMarkCacheIfNeeded(ThreadPool & pool); + String getLogName() const { return log.loadName(); } Int64 getMaxBlockNumber() const; diff --git a/src/Storages/MergeTree/MergeTreeDataFormatVersion.h b/src/Storages/MergeTree/MergeTreeDataFormatVersion.h index 0a84f08ea71..a61938a993c 100644 --- a/src/Storages/MergeTree/MergeTreeDataFormatVersion.h +++ b/src/Storages/MergeTree/MergeTreeDataFormatVersion.h @@ -8,7 +8,7 @@ namespace DB STRONG_TYPEDEF(UInt32, MergeTreeDataFormatVersion) -const MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0}; -const MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1}; +static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0}; +static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1}; } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 8b3c7bdf3fb..176b5c00b0a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -48,6 +48,16 @@ namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; } +namespace ProfileEvents +{ + + extern const Event MergerMutatorsGetPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorPrepareRangesForMergeElapsedMicroseconds; + extern const Event MergerMutatorSelectPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorRangesForMergeCount; + extern const Event MergerMutatorPartsInRangesForMergeCount; + extern const Event MergerMutatorSelectRangePartsCount; +} namespace DB { @@ -70,6 +80,8 @@ namespace MergeTreeSetting extern const MergeTreeSettingsBool ttl_only_drop_parts; extern const MergeTreeSettingsUInt64 parts_to_throw_insert; extern const MergeTreeSettingsMergeSelectorAlgorithm merge_selector_algorithm; + extern const MergeTreeSettingsBool merge_selector_enable_heuristic_to_remove_small_parts_at_right; + extern const MergeTreeSettingsFloat merge_selector_base; } namespace ErrorCodes @@ -213,6 +225,7 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart { PartitionIdsHint res; MergeTreeData::DataPartsVector data_parts = getDataPartsToSelectMergeFrom(txn); + if (data_parts.empty()) return res; @@ -270,6 +283,8 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectMergeFrom( const MergeTreeTransactionPtr & txn, const PartitionIdsHint * partitions_hint) const { + + Stopwatch get_data_parts_for_merge_timer; auto res = getDataPartsToSelectMergeFrom(txn); if (!partitions_hint) return res; @@ -278,6 +293,8 @@ MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectM { return !partitions_hint->contains(part->info.partition_id); }); + + ProfileEvents::increment(ProfileEvents::MergerMutatorsGetPartsForMergeElapsedMicroseconds, get_data_parts_for_merge_timer.elapsedMicroseconds()); return res; } @@ -355,6 +372,7 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo const MergeTreeTransactionPtr & txn, PreformattedMessage & out_disable_reason) const { + Stopwatch ranges_for_merge_timer; MergeSelectingInfo res; res.current_time = std::time(nullptr); @@ -455,6 +473,10 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo prev_part = ∂ } + ProfileEvents::increment(ProfileEvents::MergerMutatorPartsInRangesForMergeCount, res.parts_selected_precondition); + ProfileEvents::increment(ProfileEvents::MergerMutatorRangesForMergeCount, res.parts_ranges.size()); + ProfileEvents::increment(ProfileEvents::MergerMutatorPrepareRangesForMergeElapsedMicroseconds, ranges_for_merge_timer.elapsedMicroseconds()); + return res; } @@ -469,6 +491,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( PreformattedMessage & out_disable_reason, bool dry_run) { + Stopwatch select_parts_from_ranges_timer; const auto data_settings = data.getSettings(); IMergeSelector::PartsRange parts_to_merge; @@ -540,6 +563,9 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( /// Override value from table settings simple_merge_settings.window_size = (*data_settings)[MergeTreeSetting::merge_selector_window_size]; simple_merge_settings.max_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::max_parts_to_merge_at_once]; + simple_merge_settings.enable_heuristic_to_remove_small_parts_at_right = (*data_settings)[MergeTreeSetting::merge_selector_enable_heuristic_to_remove_small_parts_at_right]; + simple_merge_settings.base = (*data_settings)[MergeTreeSetting::merge_selector_base]; + if (!(*data_settings)[MergeTreeSetting::min_age_to_force_merge_on_partition_only]) simple_merge_settings.min_age_to_force_merge = (*data_settings)[MergeTreeSetting::min_age_to_force_merge_seconds]; @@ -565,7 +591,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( if (parts_to_merge.empty()) { - out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors)"); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); + out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors) in {}ms", select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); return SelectPartsDecision::CANNOT_SELECT; } } @@ -578,8 +605,11 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( parts.push_back(part); } - LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); + LOG_DEBUG(log, "Selected {} parts from {} to {} in {}ms", parts.size(), parts.front()->name, parts.back()->name, select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectRangePartsCount, parts.size()); + future_part->assign(std::move(parts)); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); return SelectPartsDecision::SELECTED; } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 71fcb93f369..6d209b9f931 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -106,9 +106,11 @@ public: PreformattedMessage & out_disable_reason, bool dry_run = false); + /// Actually the most fresh partition with biggest modification_time String getBestPartitionToOptimizeEntire(const PartitionsInfo & partitions_info) const; /// Useful to quickly get a list of partitions that contain parts that we may want to merge + /// The result is limited by top_number_of_partitions_to_consider_for_merge PartitionIdsHint getPartitionsThatMayBeMerged( size_t max_total_size_to_merge, const AllowedMergingPredicate & can_merge_callback, diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp index 37f578b0c25..6ec4bc31d90 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp @@ -14,20 +14,22 @@ namespace ErrorCodes } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_) + const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , volume(std::move(volume_)) , root_path(std::move(root_path_)) , part_dir(std::move(part_dir_)) + , read_settings(read_settings_) { } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_) + const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , part_storage(std::move(part_storage_)) + , read_settings(read_settings_) { } @@ -73,7 +75,8 @@ MutableDataPartStoragePtr MergeTreeDataPartBuilder::getPartStorageByType( MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings &) /// Unused here, but used in private repo. { if (!volume_) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create part storage, because volume is not specified"); @@ -112,7 +115,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartType(MergeTreeDataP MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartStorageType(MergeTreeDataPartStorageType storage_type_) { - part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir); + part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir, read_settings); return *this; } @@ -126,7 +129,8 @@ MergeTreeDataPartBuilder::PartStorageAndMarkType MergeTreeDataPartBuilder::getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings & read_settings_) { auto disk = volume_->getDisk(); auto part_relative_path = fs::path(root_path_) / part_dir_; @@ -138,7 +142,7 @@ MergeTreeDataPartBuilder::getPartStorageAndMarkType( if (MarkType::isMarkFileExtension(ext)) { - auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_); + auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_, read_settings_); return {std::move(storage), MarkType(ext)}; } } @@ -156,7 +160,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromDisk() MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromVolume() { assert(volume); - auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir); + auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir, read_settings); if (!storage || !mark_type) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h index 0f54ff0a631..bce881a1970 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h @@ -21,8 +21,8 @@ using VolumePtr = std::shared_ptr; class MergeTreeDataPartBuilder { public: - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_); - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_); std::shared_ptr build(); @@ -42,7 +42,8 @@ public: static PartStorageAndMarkType getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); private: Self & withPartFormatFromVolume(); @@ -52,7 +53,8 @@ private: MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); const MergeTreeData & data; const String name; @@ -64,6 +66,8 @@ private: std::optional part_type; MutableDataPartStoragePtr part_storage; const IMergeTreeDataPart * parent_part = nullptr; + + const ReadSettings read_settings; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index fd46b3b9540..14c2da82de1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -136,6 +136,32 @@ void MergeTreeDataPartCompact::loadIndexGranularity() loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), getDataPartStorage()); } +void MergeTreeDataPartCompact::loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const +{ + if (column_names.empty() || !mark_cache) + return; + + auto context = storage.getContext(); + auto read_settings = context->getReadSettings(); + auto * load_marks_threadpool = read_settings.load_marks_asynchronously ? &context->getLoadMarksThreadpool() : nullptr; + auto info_for_read = std::make_shared(shared_from_this(), std::make_shared()); + + LOG_TEST(getLogger("MergeTreeDataPartCompact"), "Loading marks into mark cache for columns {} of part {}", toString(column_names), name); + + MergeTreeMarksLoader loader( + info_for_read, + mark_cache, + index_granularity_info.getMarksFilePath(DATA_FILE_NAME), + index_granularity.getMarksCount(), + index_granularity_info, + /*save_marks_in_cache=*/ true, + read_settings, + load_marks_threadpool, + columns.size()); + + loader.loadMarks(); +} + bool MergeTreeDataPartCompact::hasColumnFiles(const NameAndTypePair & column) const { if (!getColumnPosition(column.getNameInStorage())) @@ -230,7 +256,14 @@ bool MergeTreeDataPartCompact::isStoredOnRemoteDiskWithZeroCopySupport() const MergeTreeDataPartCompact::~MergeTreeDataPartCompact() { - removeIfNeeded(); + try + { + removeIfNeeded(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 9512485c54e..8e279571578 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -54,6 +54,8 @@ public: std::optional getFileNameForColumn(const NameAndTypePair & /* column */) const override { return DATA_FILE_NAME; } + void loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const override; + ~MergeTreeDataPartCompact() override; protected: diff --git a/src/Storages/MergeTree/MergeTreeDataPartType.h b/src/Storages/MergeTree/MergeTreeDataPartType.h index 8177809d41e..a59ccc2fab1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartType.h +++ b/src/Storages/MergeTree/MergeTreeDataPartType.h @@ -45,6 +45,7 @@ public: enum Value { Full, + Packed, Unknown, }; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index 9bbf0ad9739..c515d645253 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -182,6 +182,47 @@ void MergeTreeDataPartWide::loadIndexGranularity() loadIndexGranularityImpl(index_granularity, index_granularity_info, getDataPartStorage(), *any_column_filename); } +void MergeTreeDataPartWide::loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const +{ + if (column_names.empty() || !mark_cache) + return; + + std::vector> loaders; + + auto context = storage.getContext(); + auto read_settings = context->getReadSettings(); + auto * load_marks_threadpool = read_settings.load_marks_asynchronously ? &context->getLoadMarksThreadpool() : nullptr; + auto info_for_read = std::make_shared(shared_from_this(), std::make_shared()); + + LOG_TEST(getLogger("MergeTreeDataPartWide"), "Loading marks into mark cache for columns {} of part {}", toString(column_names), name); + + for (const auto & column_name : column_names) + { + auto serialization = getSerialization(column_name); + serialization->enumerateStreams([&](const auto & subpath) + { + auto stream_name = getStreamNameForColumn(column_name, subpath, checksums); + if (!stream_name) + return; + + loaders.emplace_back(std::make_unique( + info_for_read, + mark_cache, + index_granularity_info.getMarksFilePath(*stream_name), + index_granularity.getMarksCount(), + index_granularity_info, + /*save_marks_in_cache=*/ true, + read_settings, + load_marks_threadpool, + /*num_columns_in_mark=*/ 1)); + + loaders.back()->startAsyncLoad(); + }); + } + + for (auto & loader : loaders) + loader->loadMarks(); +} bool MergeTreeDataPartWide::isStoredOnRemoteDisk() const { @@ -200,7 +241,14 @@ bool MergeTreeDataPartWide::isStoredOnRemoteDiskWithZeroCopySupport() const MergeTreeDataPartWide::~MergeTreeDataPartWide() { - removeIfNeeded(); + try + { + removeIfNeeded(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } void MergeTreeDataPartWide::doCheckConsistency(bool require_part_metadata) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 42893f47573..022a5fb746c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -51,6 +51,8 @@ public: std::optional getColumnModificationTime(const String & column_name) const override; + void loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const override; + protected: static void loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index a859172023f..c8d11ced683 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -1,5 +1,6 @@ #include #include +#include "Formats/MarkInCompressedFile.h" namespace DB { @@ -54,26 +55,15 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( marks_source_hashing = std::make_unique(*marks_compressor); } + if (settings.save_marks_in_cache) + { + cached_marks[MergeTreeDataPartCompact::DATA_FILE_NAME] = std::make_unique(); + } + for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); - } -} - -void MergeTreeDataPartWriterCompact::initDynamicStreamsIfNeeded(const Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block.getByName(column.name).column, compression); - } + MergeTreeDataPartWriterCompact::addStreams(column, nullptr, compression); } } @@ -175,20 +165,25 @@ void writeColumnSingleGranule( void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block result_block = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(result_block); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(result_block); assert(index_granularity_for_block >= 1); - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, result_block.rows()); } - Block result_block = permuteBlockIfNeeded(block, permutation); + result_block = permuteBlockIfNeeded(result_block, permutation); if (!header) header = result_block.cloneEmpty(); @@ -255,9 +250,12 @@ void MergeTreeDataPartWriterCompact::writeDataBlock(const Block & block, const G return &result_stream->hashing_buf; }; + MarkInCompressedFile mark{plain_hashing.count(), static_cast(0)}; + writeBinaryLittleEndian(mark.offset_in_compressed_file, marks_out); + writeBinaryLittleEndian(mark.offset_in_decompressed_block, marks_out); - writeBinaryLittleEndian(plain_hashing.count(), marks_out); - writeBinaryLittleEndian(static_cast(0), marks_out); + if (!cached_marks.empty()) + cached_marks.begin()->second->push_back(mark); writeColumnSingleGranule( block.getByName(name_and_type->name), getSerialization(name_and_type->name), @@ -296,11 +294,17 @@ void MergeTreeDataPartWriterCompact::fillDataChecksums(MergeTreeDataPartChecksum if (with_final_mark && data_written) { + MarkInCompressedFile mark{plain_hashing.count(), 0}; + for (size_t i = 0; i < columns_list.size(); ++i) { - writeBinaryLittleEndian(plain_hashing.count(), marks_out); - writeBinaryLittleEndian(static_cast(0), marks_out); + writeBinaryLittleEndian(mark.offset_in_compressed_file, marks_out); + writeBinaryLittleEndian(mark.offset_in_decompressed_block, marks_out); + + if (!cached_marks.empty()) + cached_marks.begin()->second->push_back(mark); } + writeBinaryLittleEndian(static_cast(0), marks_out); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index b440a37222d..b3e2e78491d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -32,6 +32,8 @@ public: void fillChecksums(MergeTreeDataPartChecksums & checksums, NameSet & checksums_to_remove) override; void finish(bool sync) override; + size_t getNumberOfOpenStreams() const override { return 1; } + private: /// Finish serialization of the data. Flush rows in buffer to disk, compute checksums. void fillDataChecksums(MergeTreeDataPartChecksums & checksums); @@ -48,9 +50,7 @@ private: void addToChecksums(MergeTreeDataPartChecksums & checksums); - void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) override; Block header; @@ -104,8 +104,6 @@ private: /// then finally to 'marks_file'. std::unique_ptr marks_compressor; std::unique_ptr marks_source_hashing; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 89db8174636..c483d47fed7 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -3,26 +3,16 @@ #include #include #include -#include #include - namespace ProfileEvents { - extern const Event MergeTreeDataWriterSkipIndicesCalculationMicroseconds; - extern const Event MergeTreeDataWriterStatisticsCalculationMicroseconds; -} - -namespace CurrentMetrics -{ - extern const Metric CompressionThread; - extern const Metric CompressionThreadActive; - extern const Metric CompressionThreadScheduled; +extern const Event MergeTreeDataWriterSkipIndicesCalculationMicroseconds; +extern const Event MergeTreeDataWriterStatisticsCalculationMicroseconds; } namespace DB { - namespace MergeTreeSetting { extern const MergeTreeSettingsUInt64 index_granularity; @@ -35,53 +25,57 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() +template +void MergeTreeDataPartWriterOnDisk::Stream::preFinalize() { /// Here the main goal is to do preFinalize calls for plain_file and marks_file /// Before that all hashing and compression buffers have to be finalized /// Otherwise some data might stuck in the buffers above plain_file and marks_file /// Also the order is important - compressed_hashing->finalize(); - compressor->finalize(); - plain_hashing->finalize(); + compressed_hashing.finalize(); + compressor.finalize(); + plain_hashing.finalize(); - if (marks_hashing) + if constexpr (!only_plain_file) { if (compress_marks) { - marks_compressed_hashing->finalize(); - marks_compressor->finalize(); + marks_compressed_hashing.finalize(); + marks_compressor.finalize(); } - marks_hashing->finalize(); + marks_hashing.finalize(); } plain_file->preFinalize(); - if (marks_file) + if constexpr (!only_plain_file) marks_file->preFinalize(); is_prefinalized = true; } -void MergeTreeDataPartWriterOnDisk::Stream::finalize() +template +void MergeTreeDataPartWriterOnDisk::Stream::finalize() { if (!is_prefinalized) preFinalize(); plain_file->finalize(); - if (marks_file) + if constexpr (!only_plain_file) marks_file->finalize(); } -void MergeTreeDataPartWriterOnDisk::Stream::sync() const +template +void MergeTreeDataPartWriterOnDisk::Stream::sync() const { plain_file->sync(); - if (marks_file) + if constexpr (!only_plain_file) marks_file->sync(); } -MergeTreeDataPartWriterOnDisk::Stream::Stream( +template<> +MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, @@ -96,45 +90,20 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, + plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), + plain_hashing(*plain_file), + compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), + compressed_hashing(compressor), + marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), + marks_hashing(*marks_file), + marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), + marks_compressed_hashing(marks_compressor), compress_marks(MarkType(marks_file_extension).compressed) { - plain_file = data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings); - plain_hashing.emplace(*plain_file); - - if (query_write_settings.max_compression_threads > 1) - { - compression_thread_pool.emplace( - CurrentMetrics::CompressionThread, CurrentMetrics::CompressionThreadActive, CurrentMetrics::CompressionThreadScheduled, - query_write_settings.max_compression_threads); - - compressor = std::make_unique( - *plain_hashing, - compression_codec_, - max_compress_block_size_, - query_write_settings.max_compression_threads, - *compression_thread_pool); - - is_compressor_parallel = true; - } - else - { - compressor = std::make_unique( - *plain_hashing, - compression_codec_, - max_compress_block_size_, - query_write_settings.use_adaptive_write_buffer, - query_write_settings.adaptive_write_buffer_initial_size); - } - - compressed_hashing.emplace(*compressor); - - marks_file = data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings); - marks_hashing.emplace(*marks_file); - marks_compressor.emplace(*marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size); - marks_compressed_hashing.emplace(*marks_compressor); } -MergeTreeDataPartWriterOnDisk::Stream::Stream( +template<> +MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, @@ -146,33 +115,34 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( data_file_extension{data_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(std::make_unique(*plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size)), - compressed_hashing(*compressor), + compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), + compressed_hashing(compressor), compress_marks(false) { } -void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums) +template +void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums) { String name = escaped_column_name; checksums.files[name + data_file_extension].is_compressed = true; - checksums.files[name + data_file_extension].uncompressed_size = compressed_hashing->count(); - checksums.files[name + data_file_extension].uncompressed_hash = compressed_hashing->getHash(); - checksums.files[name + data_file_extension].file_size = plain_hashing->count(); - checksums.files[name + data_file_extension].file_hash = plain_hashing->getHash(); + checksums.files[name + data_file_extension].uncompressed_size = compressed_hashing.count(); + checksums.files[name + data_file_extension].uncompressed_hash = compressed_hashing.getHash(); + checksums.files[name + data_file_extension].file_size = plain_hashing.count(); + checksums.files[name + data_file_extension].file_hash = plain_hashing.getHash(); - if (marks_hashing) + if constexpr (!only_plain_file) { if (compress_marks) { checksums.files[name + marks_file_extension].is_compressed = true; - checksums.files[name + marks_file_extension].uncompressed_size = marks_compressed_hashing->count(); - checksums.files[name + marks_file_extension].uncompressed_hash = marks_compressed_hashing->getHash(); + checksums.files[name + marks_file_extension].uncompressed_size = marks_compressed_hashing.count(); + checksums.files[name + marks_file_extension].uncompressed_hash = marks_compressed_hashing.getHash(); } - checksums.files[name + marks_file_extension].file_size = marks_hashing->count(); - checksums.files[name + marks_file_extension].file_hash = marks_hashing->getHash(); + checksums.files[name + marks_file_extension].file_size = marks_hashing.count(); + checksums.files[name + marks_file_extension].file_hash = marks_hashing.getHash(); } } @@ -209,8 +179,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't take information about index granularity from blocks, when non empty index_granularity array specified"); - if (!getDataPartStorage().exists()) - getDataPartStorage().createDirectories(); + /// We don't need to check if it exists or not, createDirectories doesn't throw + getDataPartStorage().createDirectories(); if (settings.rewrite_primary_key) initPrimaryIndex(); @@ -306,12 +276,12 @@ void MergeTreeDataPartWriterOnDisk::initStatistics() for (const auto & stat_ptr : stats) { String stats_name = stat_ptr->getFileName(); - stats_streams.emplace_back(std::make_unique( - stats_name, - data_part_storage, - stats_name, STATS_FILE_SUFFIX, - default_codec, settings.max_compress_block_size, - settings.query_write_settings)); + stats_streams.emplace_back(std::make_unique>( + stats_name, + data_part_storage, + stats_name, STATS_FILE_SUFFIX, + default_codec, settings.max_compress_block_size, + settings.query_write_settings)); } } @@ -328,14 +298,14 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() { String stream_name = skip_index->getFileName(); skip_indices_streams.emplace_back( - std::make_unique( - stream_name, - data_part_storage, - stream_name, skip_index->getSerializedFileExtension(), - stream_name, marks_file_extension, - default_codec, settings.max_compress_block_size, - marks_compression_codec, settings.marks_compress_block_size, - settings.query_write_settings)); + std::make_unique>( + stream_name, + data_part_storage, + stream_name, skip_index->getSerializedFileExtension(), + stream_name, marks_file_extension, + default_codec, settings.max_compress_block_size, + marks_compression_codec, settings.marks_compress_block_size, + settings.query_write_settings)); GinIndexStorePtr store = nullptr; if (typeid_cast(&*skip_index) != nullptr) @@ -411,7 +381,7 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { const auto index_helper = skip_indices[i]; auto & stream = *skip_indices_streams[i]; - WriteBuffer & marks_out = stream.compress_marks ? *stream.marks_compressed_hashing : *stream.marks_hashing; + WriteBuffer & marks_out = stream.compress_marks ? stream.marks_compressed_hashing : stream.marks_hashing; GinIndexStorePtr store; if (typeid_cast(&*index_helper) != nullptr) @@ -427,7 +397,7 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { if (skip_index_accumulated_marks[i] == index_helper->index.granularity) { - skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(*stream.compressed_hashing); + skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(stream.compressed_hashing); skip_index_accumulated_marks[i] = 0; } @@ -435,11 +405,11 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializeSkipIndices(const Block { skip_indices_aggregators[i] = index_helper->createIndexAggregatorForPart(store, settings); - if (stream.compressed_hashing->offset() >= settings.min_compress_block_size) - stream.compressed_hashing->next(); + if (stream.compressed_hashing.offset() >= settings.min_compress_block_size) + stream.compressed_hashing.next(); - writeBinaryLittleEndian(stream.plain_hashing->count(), marks_out); - writeBinaryLittleEndian(stream.compressed_hashing->offset(), marks_out); + writeBinaryLittleEndian(stream.plain_hashing.count(), marks_out); + writeBinaryLittleEndian(stream.compressed_hashing.offset(), marks_out); /// Actually this numbers is redundant, but we have to store them /// to be compatible with the normal .mrk2 file format @@ -519,7 +489,7 @@ void MergeTreeDataPartWriterOnDisk::fillSkipIndicesChecksums(MergeTreeData::Data { auto & stream = *skip_indices_streams[i]; if (!skip_indices_aggregators[i]->empty()) - skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(*stream.compressed_hashing); + skip_indices_aggregators[i]->getGranuleAndReset()->serializeBinary(stream.compressed_hashing); /// Register additional files written only by the full-text index. Required because otherwise DROP TABLE complains about unknown /// files. Note that the provided actual checksums are bogus. The problem is that at this point the file writes happened already and @@ -559,7 +529,7 @@ void MergeTreeDataPartWriterOnDisk::fillStatisticsChecksums(MergeTreeData::DataP for (size_t i = 0; i < stats.size(); i++) { auto & stream = *stats_streams[i]; - stats[i]->serialize(*stream.compressed_hashing); + stats[i]->serialize(stream.compressed_hashing); stream.preFinalize(); stream.addToChecksums(checksums); } @@ -594,4 +564,46 @@ Names MergeTreeDataPartWriterOnDisk::getSkipIndicesColumns() const return Names(skip_indexes_column_names_set.begin(), skip_indexes_column_names_set.end()); } +void MergeTreeDataPartWriterOnDisk::initOrAdjustDynamicStructureIfNeeded(Block & block) +{ + if (!is_dynamic_streams_initialized) + { + for (const auto & column : columns_list) + { + if (column.type->hasDynamicSubcolumns()) + { + /// Create all streams for dynamic subcolumns using dynamic structure from block. + auto compression = getCodecDescOrDefault(column.name, default_codec); + addStreams(column, block.getByName(column.name).column, compression); + } + } + is_dynamic_streams_initialized = true; + block_sample = block.cloneEmpty(); + } + else + { + size_t size = block.columns(); + for (size_t i = 0; i != size; ++i) + { + auto & column = block.getByPosition(i); + const auto & sample_column = block_sample.getByPosition(i); + /// Check if the dynamic structure of this column is different from the sample column. + if (column.type->hasDynamicSubcolumns() && !column.column->dynamicStructureEquals(*sample_column.column)) + { + /// We need to change the dynamic structure of the column so it matches the sample column. + /// To do it, we create empty column of this type, take dynamic structure from sample column + /// and insert data into it. Resulting column will have required dynamic structure and the content + /// of the column in current block. + auto new_column = sample_column.type->createColumn(); + new_column->takeDynamicStructureFromSourceColumns({sample_column.column}); + new_column->insertRangeFrom(*column.column, 0, column.column->size()); + column.column = std::move(new_column); + } + } + } +} + +template struct MergeTreeDataPartWriterOnDisk::Stream; +template struct MergeTreeDataPartWriterOnDisk::Stream; + } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index cb46785ccbd..49d654c15e1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { @@ -27,7 +28,7 @@ struct Granule /// this granule can be continuation of the previous one. bool mark_on_start; /// if true: When this granule will be written to disk all rows for corresponding mark will - /// be written. It doesn't mean that rows_to_write == index_granularity.getMarkRows(mark_number), + /// be wrtten. It doesn't mean that rows_to_write == index_granularity.getMarkRows(mark_number), /// We may have a lot of small blocks between two marks and this may be the last one. bool is_complete; }; @@ -44,6 +45,7 @@ public: /// Helper class, which holds chain of buffers to write data file with marks. /// It is used to write: one column, skip index or all columns (in compact format). + template struct Stream { Stream( @@ -74,32 +76,30 @@ public: /// compressed_hashing -> compressor -> plain_hashing -> plain_file std::unique_ptr plain_file; - std::optional plain_hashing; - /// This could be either CompressedWriteBuffer or ParallelCompressedWriteBuffer - bool is_compressor_parallel = false; - std::unique_ptr compressor; - std::optional compressed_hashing; + HashingWriteBuffer plain_hashing; + CompressedWriteBuffer compressor; + HashingWriteBuffer compressed_hashing; /// marks_compressed_hashing -> marks_compressor -> marks_hashing -> marks_file std::unique_ptr marks_file; - std::optional marks_hashing; - std::optional marks_compressor; - std::optional marks_compressed_hashing; + std::conditional_t marks_hashing; + std::conditional_t marks_compressor; + std::conditional_t marks_compressed_hashing; bool compress_marks; bool is_prefinalized = false; - /// Thread pool for parallel compression. - std::optional compression_thread_pool; - void preFinalize(); + void finalize(); + void sync() const; void addToChecksums(MergeTreeDataPartChecksums & checksums); }; - using StreamPtr = std::unique_ptr; + using StreamPtr = std::unique_ptr>; + using StatisticStreamPtr = std::unique_ptr>; MergeTreeDataPartWriterOnDisk( const String & data_part_name_, @@ -154,10 +154,18 @@ protected: /// Get unique non ordered skip indices column. Names getSkipIndicesColumns() const; + virtual void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) = 0; + + /// On first block create all required streams for columns with dynamic subcolumns and remember the block sample. + /// On each next block check if dynamic structure of the columns equals to the dynamic structure of the same + /// columns in the sample block. If for some column dynamic structure is different, adjust it so it matches + /// the structure from the sample. + void initOrAdjustDynamicStructureIfNeeded(Block & block); + const MergeTreeIndices skip_indices; const ColumnsStatistics stats; - std::vector stats_streams; + std::vector stats_streams; const String marks_file_extension; const CompressionCodecPtr default_codec; @@ -188,6 +196,10 @@ protected: size_t current_mark = 0; GinIndexStoreFactory::GinIndexStores gin_index_stores; + + bool is_dynamic_streams_initialized = false; + Block block_sample; + private: void initSkipIndices(); void initPrimaryIndex(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 860722ba870..7c9724b1b75 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -6,10 +6,11 @@ #include #include #include +#include +#include #include #include - namespace DB { @@ -106,27 +107,16 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( indices_to_recalc_, stats_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) { + if (settings.save_marks_in_cache) + { + auto columns_vec = getColumnsToPrewarmMarks(*storage_settings, columns_list); + columns_to_load_marks = NameSet(columns_vec.begin(), columns_vec.end()); + } + for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); - } -} - -void MergeTreeDataPartWriterWide::initDynamicStreamsIfNeeded(const DB::Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - block_sample = block.cloneEmpty(); - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block_sample.getByName(column.name).column, compression); - } + MergeTreeDataPartWriterWide::addStreams(column, nullptr, compression); } } @@ -188,7 +178,7 @@ void MergeTreeDataPartWriterWide::addStreams( query_write_settings.use_adaptive_write_buffer = settings.use_adaptive_write_buffer_for_dynamic_subcolumns && ISerialization::isDynamicSubcolumn(substream_path, substream_path.size()); query_write_settings.adaptive_write_buffer_initial_size = settings.adaptive_write_buffer_initial_size; - column_streams[stream_name] = std::make_unique( + column_streams[stream_name] = std::make_unique>( stream_name, data_part_storage, stream_name, DATA_FILE_EXTENSION, @@ -199,6 +189,9 @@ void MergeTreeDataPartWriterWide::addStreams( settings.marks_compress_block_size, query_write_settings); + if (columns_to_load_marks.contains(name_and_type.name)) + cached_marks.emplace(stream_name, std::make_unique()); + full_name_to_stream_name.emplace(full_stream_name, stream_name); stream_name_to_full_name.emplace(stream_name, full_stream_name); }; @@ -231,7 +224,7 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett if (is_offsets && offset_columns.contains(stream_name)) return nullptr; - return &column_streams.at(stream_name)->compressed_hashing.value(); + return &column_streams.at(stream_name)->compressed_hashing; }; } @@ -267,15 +260,20 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block block_to_write = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(block_to_write); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical part of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(block_to_write); if (rows_written_in_last_mark > 0) { size_t rows_left_in_last_mark = index_granularity.getMarkRows(getCurrentMark()) - rows_written_in_last_mark; @@ -293,11 +291,9 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm } } - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, block_to_write.rows()); } - Block block_to_write = block; - auto granules_to_write = getGranulesToWrite(index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; @@ -363,12 +359,16 @@ void MergeTreeDataPartWriterWide::writeSingleMark( void MergeTreeDataPartWriterWide::flushMarkToFile(const StreamNameAndMark & stream_with_mark, size_t rows_in_mark) { auto & stream = *column_streams[stream_with_mark.stream_name]; - WriteBuffer & marks_out = stream.compress_marks ? *stream.marks_compressed_hashing : *stream.marks_hashing; + WriteBuffer & marks_out = stream.compress_marks ? stream.marks_compressed_hashing : stream.marks_hashing; writeBinaryLittleEndian(stream_with_mark.mark.offset_in_compressed_file, marks_out); writeBinaryLittleEndian(stream_with_mark.mark.offset_in_decompressed_block, marks_out); + if (settings.can_use_adaptive_granularity) writeBinaryLittleEndian(rows_in_mark, marks_out); + + if (auto it = cached_marks.find(stream_with_mark.stream_name); it != cached_marks.end()) + it->second->push_back(stream_with_mark.mark); } StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( @@ -400,22 +400,15 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( auto & stream = *column_streams[stream_name]; /// There could already be enough data to compress into the new block. - auto push_mark = [&] - { - StreamNameAndMark stream_with_mark; - stream_with_mark.stream_name = stream_name; - stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing->count(); - stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing->offset(); - result.push_back(stream_with_mark); - }; + if (stream.compressed_hashing.offset() >= min_compress_block_size) + stream.compressed_hashing.next(); - if (stream.compressed_hashing->offset() >= min_compress_block_size) - { + StreamNameAndMark stream_with_mark; + stream_with_mark.stream_name = stream_name; + stream_with_mark.mark.offset_in_compressed_file = stream.plain_hashing.count(); + stream_with_mark.mark.offset_in_decompressed_block = stream.compressed_hashing.offset(); - stream.compressed_hashing->next(); - } - - push_mark(); + result.push_back(stream_with_mark); }, name_and_type.type, column_sample); return result; @@ -446,7 +439,7 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( if (is_offsets && offset_columns.contains(stream_name)) return; - column_streams.at(stream_name)->compressed_hashing->nextIfAtEnd(); + column_streams.at(stream_name)->compressed_hashing.nextIfAtEnd(); }, name_and_type.type, column.getPtr()); } @@ -750,7 +743,6 @@ void MergeTreeDataPartWriterWide::fillChecksums(MergeTreeDataPartChecksums & che fillPrimaryIndexChecksums(checksums); fillSkipIndicesChecksums(checksums); - fillStatisticsChecksums(checksums); } @@ -764,7 +756,6 @@ void MergeTreeDataPartWriterWide::finish(bool sync) finishPrimaryIndexSerialization(sync); finishSkipIndicesSerialization(sync); - finishStatisticsSerialization(sync); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index ab86ed27c7e..19304b28c6c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -43,6 +43,8 @@ public: void finish(bool sync) final; + size_t getNumberOfOpenStreams() const override { return column_streams.size(); } + private: /// Finish serialization of data: write final mark if required and compute checksums /// Also validate written data in debug mode @@ -91,9 +93,7 @@ private: void addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, - const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + const ASTPtr & effective_codec_desc) override; /// Method for self check (used in debug-build only). Checks that written /// data and corresponding marks are consistent. Otherwise throws logical @@ -136,13 +136,12 @@ private: using MarksForColumns = std::unordered_map; MarksForColumns last_non_written_marks; + /// Set of columns to put marks in cache during write. + NameSet columns_to_load_marks; + /// How many rows we have already written in the current mark. /// More than zero when incoming blocks are smaller then their granularity. size_t rows_written_in_last_mark = 0; - - Block block_sample; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 13918ae8e91..1b3c58000e7 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -71,10 +71,7 @@ namespace Setting extern const SettingsString force_data_skipping_indices; extern const SettingsBool force_index_by_date; extern const SettingsSeconds lock_acquire_timeout; - extern const SettingsUInt64 max_parser_backtracks; - extern const SettingsUInt64 max_parser_depth; extern const SettingsInt64 max_partitions_to_read; - extern const SettingsUInt64 max_query_size; extern const SettingsUInt64 max_threads_for_indexes; extern const SettingsNonZeroUInt64 max_parallel_replicas; extern const SettingsUInt64 merge_tree_coarse_index_granularity; @@ -640,20 +637,11 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd if (use_skip_indexes && settings[Setting::force_data_skipping_indices].changed) { - const auto & indices = settings[Setting::force_data_skipping_indices].toString(); - - Strings forced_indices; - { - Tokens tokens(indices.data(), indices.data() + indices.size(), settings[Setting::max_query_size]); - IParser::Pos pos( - tokens, static_cast(settings[Setting::max_parser_depth]), static_cast(settings[Setting::max_parser_backtracks])); - Expected expected; - if (!parseIdentifiersOrStringLiterals(pos, expected, forced_indices)) - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse force_data_skipping_indices ('{}')", indices); - } + const auto & indices_str = settings[Setting::force_data_skipping_indices].toString(); + auto forced_indices = parseIdentifiersOrStringLiterals(indices_str, settings); if (forced_indices.empty()) - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "No indices parsed from force_data_skipping_indices ('{}')", indices); + throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "No indices parsed from force_data_skipping_indices ('{}')", indices_str); std::unordered_set useful_indices_names; for (const auto & useful_index : skip_indexes.useful_indices) @@ -1022,11 +1010,7 @@ size_t MergeTreeDataSelectExecutor::roundRowsOrBytesToMarks( /// Same as roundRowsOrBytesToMarks() but do not return more then max_marks size_t MergeTreeDataSelectExecutor::minMarksForConcurrentRead( - size_t rows_setting, - size_t bytes_setting, - size_t rows_granularity, - size_t bytes_granularity, - size_t max_marks) + size_t rows_setting, size_t bytes_setting, size_t rows_granularity, size_t bytes_granularity, size_t min_marks, size_t max_marks) { size_t marks = 1; @@ -1035,18 +1019,17 @@ size_t MergeTreeDataSelectExecutor::minMarksForConcurrentRead( else if (rows_setting) marks = (rows_setting + rows_granularity - 1) / rows_granularity; - if (bytes_granularity == 0) - return marks; - - /// Overflow - if (bytes_setting + bytes_granularity <= bytes_setting) /// overflow - return max_marks; - if (bytes_setting) - return std::max(marks, (bytes_setting + bytes_granularity - 1) / bytes_granularity); - return marks; + if (bytes_granularity) + { + /// Overflow + if (bytes_setting + bytes_granularity <= bytes_setting) /// overflow + marks = max_marks; + else if (bytes_setting) + marks = std::max(marks, (bytes_setting + bytes_granularity - 1) / bytes_granularity); + } + return std::max(marks, min_marks); } - /// Calculates a set of mark ranges, that could possibly contain keys, required by condition. /// In other words, it removes subranges from whole range, that definitely could not contain required keys. /// If @exact_ranges is not null, fill it with ranges containing marks of fully matched records. diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 70536b7aa54..d16d9243c14 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -153,11 +153,7 @@ public: /// The same as roundRowsOrBytesToMarks, but return no more than max_marks. static size_t minMarksForConcurrentRead( - size_t rows_setting, - size_t bytes_setting, - size_t rows_granularity, - size_t bytes_granularity, - size_t max_marks); + size_t rows_setting, size_t bytes_setting, size_t rows_granularity, size_t bytes_granularity, size_t min_marks, size_t max_marks); /// If possible, construct optional key condition from predicates containing _part_offset column. static void buildKeyConditionFromPartOffset( diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 67fef759ed4..6d19f45e2c4 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -73,6 +73,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsFloat min_free_disk_ratio_to_perform_insert; extern const MergeTreeSettingsBool optimize_row_order; extern const MergeTreeSettingsFloat ratio_of_defaults_for_sparse_serialization; + extern const MergeTreeSettingsBool prewarm_mark_cache; } namespace ErrorCodes @@ -609,7 +610,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( } } - auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir) + auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir, getReadSettings()) .withPartFormat(data.choosePartFormat(expected_size, block.rows())) .withPartInfo(new_part_info) .build(); @@ -684,6 +685,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( /// This effectively chooses minimal compression method: /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); + bool save_marks_in_cache = (*data_settings)[MergeTreeSetting::prewarm_mark_cache] && data.getContext()->getMarkCache(); auto out = std::make_unique( new_data_part, @@ -693,8 +695,9 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( statistics, compression_codec, context->getCurrentTransaction() ? context->getCurrentTransaction()->tid : Tx::PrehistoricTID, - false, - false, + /*reset_columns=*/ false, + save_marks_in_cache, + /*blocks_are_granules_size=*/ false, context->getWriteSettings()); out->writeWithPermutation(block, perm_ptr); @@ -829,6 +832,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( /// This effectively chooses minimal compression method: /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); + bool save_marks_in_cache = (*data.getSettings())[MergeTreeSetting::prewarm_mark_cache] && data.getContext()->getMarkCache(); auto out = std::make_unique( new_data_part, @@ -839,7 +843,10 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( ColumnsStatistics{}, compression_codec, Tx::PrehistoricTID, - false, false, data.getContext()->getWriteSettings()); + /*reset_columns=*/ false, + save_marks_in_cache, + /*blocks_are_granules_size=*/ false, + data.getContext()->getWriteSettings()); out->writeWithPermutation(block, perm_ptr); auto finalizer = out->finalizePartAsync(new_data_part, false); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.cpp b/src/Storages/MergeTree/MergeTreeIOSettings.cpp index 6705d75af41..bacfbbd5720 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeIOSettings.cpp @@ -26,7 +26,6 @@ namespace MergeTreeSetting extern const MergeTreeSettingsString primary_key_compression_codec; extern const MergeTreeSettingsBool use_adaptive_write_buffer_for_dynamic_subcolumns; extern const MergeTreeSettingsBool use_compact_variant_discriminators_serialization; - extern const MergeTreeSettingsUInt64 max_compression_threads; } MergeTreeWriterSettings::MergeTreeWriterSettings( @@ -35,6 +34,7 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( const MergeTreeSettingsPtr & storage_settings, bool can_use_adaptive_granularity_, bool rewrite_primary_key_, + bool save_marks_in_cache_, bool blocks_are_granules_size_) : min_compress_block_size( (*storage_settings)[MergeTreeSetting::min_compress_block_size] ? (*storage_settings)[MergeTreeSetting::min_compress_block_size] : global_settings[Setting::min_compress_block_size]) @@ -47,6 +47,7 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( , primary_key_compress_block_size((*storage_settings)[MergeTreeSetting::primary_key_compress_block_size]) , can_use_adaptive_granularity(can_use_adaptive_granularity_) , rewrite_primary_key(rewrite_primary_key_) + , save_marks_in_cache(save_marks_in_cache_) , blocks_are_granules_size(blocks_are_granules_size_) , query_write_settings(query_write_settings_) , low_cardinality_max_dictionary_size(global_settings[Setting::low_cardinality_max_dictionary_size]) @@ -55,7 +56,6 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( , use_adaptive_write_buffer_for_dynamic_subcolumns((*storage_settings)[MergeTreeSetting::use_adaptive_write_buffer_for_dynamic_subcolumns]) , adaptive_write_buffer_initial_size((*storage_settings)[MergeTreeSetting::adaptive_write_buffer_initial_size]) { - query_write_settings.max_compression_threads = (*storage_settings)[MergeTreeSetting::max_compression_threads]; } } diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index fcc72815d8f..4d1d2533729 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -2,6 +2,7 @@ #include #include #include +#include #include @@ -60,7 +61,8 @@ struct MergeTreeWriterSettings const MergeTreeSettingsPtr & storage_settings, bool can_use_adaptive_granularity_, bool rewrite_primary_key_, - bool blocks_are_granules_size_ = false); + bool save_marks_in_cache_, + bool blocks_are_granules_size_); size_t min_compress_block_size; size_t max_compress_block_size; @@ -74,6 +76,7 @@ struct MergeTreeWriterSettings bool can_use_adaptive_granularity; bool rewrite_primary_key; + bool save_marks_in_cache; bool blocks_are_granules_size; WriteSettings query_write_settings; diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 467d2567df1..d69a00643f0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -96,29 +96,13 @@ size_t MergeTreeIndexGranularity::countMarksForRows(size_t from_mark, size_t num return to_mark - from_mark; } -size_t MergeTreeIndexGranularity::countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows, size_t min_marks_to_read) const +size_t MergeTreeIndexGranularity::countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const { size_t rows_before_mark = getMarkStartingRow(from_mark); size_t last_row_pos = rows_before_mark + offset_in_rows + number_of_rows; auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); size_t to_mark = it - marks_rows_partial_sums.begin(); - /// This is a heuristic to respect min_marks_to_read which is ignored by MergeTreeReadPool in case of remote disk. - /// See comment in IMergeTreeSelectAlgorithm. - if (min_marks_to_read) - { - // check overflow - size_t min_marks_to_read_2 = 0; - bool overflow = common::mulOverflow(min_marks_to_read, 2, min_marks_to_read_2); - - size_t to_mark_overwrite = 0; - if (!overflow) - overflow = common::addOverflow(from_mark, min_marks_to_read_2, to_mark_overwrite); - - if (!overflow && to_mark_overwrite < to_mark) - to_mark = to_mark_overwrite; - } - return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index 78a1423ad7e..f66e721ec1e 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -37,7 +37,7 @@ public: /// |-----|---------------------------|----|----| /// ^------------------------^-----------^ //// from_mark offset_in_rows number_of_rows - size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows, size_t min_marks_to_read) const; + size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const; /// Total marks size_t getMarksCount() const; diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp index 2af7abc17f9..9211ab51ad5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp @@ -108,6 +108,14 @@ std::optional MergeTreeIndexGranularityInfo::getMarksTypeFromFilesyste return {}; } +MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo( + MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_) + : mark_type(mark_type_) + , fixed_index_granularity(index_granularity_) + , index_granularity_bytes(index_granularity_bytes_) +{ +} + MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MergeTreeDataPartType type_) : MergeTreeIndexGranularityInfo(storage, {storage.canUseAdaptiveGranularity(), (*storage.getSettings())[MergeTreeSetting::compress_marks], type_.getValue()}) { diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h index 87445c99ade..b302d6b1a4b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h @@ -49,6 +49,7 @@ public: MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MarkType mark_type_); MergeTreeIndexGranularityInfo(MergeTreeDataPartType type_, bool is_adaptive_, size_t index_granularity_, size_t index_granularity_bytes_); + MergeTreeIndexGranularityInfo(MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_); void changeGranularityIfRequired(const IDataPartStorage & data_part_storage); diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index c269a0a23ae..f95b840e223 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -41,10 +42,16 @@ namespace ErrorCodes extern const int INCORRECT_DATA; extern const int INCORRECT_NUMBER_OF_COLUMNS; extern const int INCORRECT_QUERY; + extern const int INVALID_SETTING_VALUE; extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; } +namespace Setting +{ + extern const SettingsUInt64 hnsw_candidate_list_size_for_search; +} + namespace { @@ -104,7 +111,7 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( { USearchIndex::metric_t metric(dimensions, metric_kind, scalar_kind); - unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); + unum::usearch::index_dense_config_t config(usearch_hnsw_params.connectivity, usearch_hnsw_params.expansion_add, default_expansion_search); config.enable_key_lookups = false; /// we don't do row-to-vector lookups auto result = USearchIndex::make(metric, config); @@ -338,10 +345,11 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than {} rows per index granule.", std::numeric_limits::max()); if (index_sample_block.columns() > 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected that index is build over a single column"); - const String & index_column_name = index_sample_block.getByPosition(0).name; - const ColumnPtr & index_column = block.getByName(index_column_name).column; + const auto & index_column_name = index_sample_block.getByPosition(0).name; + + const auto & index_column = block.getByName(index_column_name).column; ColumnPtr column_cut = index_column->cut(*pos, rows_read); const auto * column_array = typeid_cast(column_cut.get()); @@ -375,8 +383,7 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index->size() + rows > std::numeric_limits::max()) throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - DataTypePtr data_type = block.getDataTypes()[0]; - const auto * data_type_array = typeid_cast(data_type.get()); + const auto * data_type_array = typeid_cast(block.getByName(index_column_name).type.get()); if (!data_type_array) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); @@ -399,7 +406,11 @@ MergeTreeIndexConditionVectorSimilarity::MergeTreeIndexConditionVectorSimilarity ContextPtr context) : vector_similarity_condition(query, context) , metric_kind(metric_kind_) + , expansion_search(context->getSettingsRef()[Setting::hnsw_candidate_list_size_for_search]) { + if (expansion_search == 0) + throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Setting 'hnsw_candidate_list_size_for_search' must not be 0"); + } bool MergeTreeIndexConditionVectorSimilarity::mayBeTrueOnGranule(MergeTreeIndexGranulePtr) const @@ -430,13 +441,17 @@ std::vector MergeTreeIndexConditionVectorSimilarity::calculateApproximat const USearchIndexWithSerializationPtr index = granule->index; if (vector_similarity_condition.getDimensions() != index->dimensions()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) " - "does not match the dimension in the index ({})", + throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) does not match the dimension in the index ({})", vector_similarity_condition.getDimensions(), index->dimensions()); const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); - auto search_result = index->search(reference_vector.data(), limit); + /// We want to run the search with the user-provided value for setting hnsw_candidate_list_size_for_search (aka. expansion_search). + /// The way to do this in USearch is to call index_dense_gt::change_expansion_search. Unfortunately, this introduces a need to + /// synchronize index access, see https://github.com/unum-cloud/usearch/issues/500. As a workaround, we extended USearch' search method + /// to accept a custom expansion_add setting. The config value is only used on the fly, i.e. not persisted in the index. + + auto search_result = index->search(reference_vector.data(), limit, USearchIndex::any_thread(), false, expansion_search); if (!search_result) throw Exception(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index. Error: {}", String(search_result.error.release())); @@ -501,13 +516,12 @@ MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) UsearchHnswParams usearch_hnsw_params; /// Optional parameters: - const bool has_six_args = (index.arguments.size() == 6); - if (has_six_args) + const bool has_five_args = (index.arguments.size() == 5); + if (has_five_args) { scalar_kind = quantizationToScalarKind.at(index.arguments[2].safeGet()); - usearch_hnsw_params = {.m = index.arguments[3].safeGet(), - .ef_construction = index.arguments[4].safeGet(), - .ef_search = index.arguments[5].safeGet()}; + usearch_hnsw_params = {.connectivity = index.arguments[3].safeGet(), + .expansion_add = index.arguments[4].safeGet()}; } return std::make_shared(index, metric_kind, scalar_kind, usearch_hnsw_params); @@ -516,25 +530,23 @@ MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* attach */) { const bool has_two_args = (index.arguments.size() == 2); - const bool has_six_args = (index.arguments.size() == 6); + const bool has_five_args = (index.arguments.size() == 5); /// Check number and type of arguments - if (!has_two_args && !has_six_args) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or six arguments"); + if (!has_two_args && !has_five_args) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or five arguments"); if (index.arguments[0].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (method) must be of type String"); if (index.arguments[1].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (metric) must be of type String"); - if (has_six_args) + if (has_five_args) { if (index.arguments[2].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Third argument of vector similarity index (quantization) must be of type String"); if (index.arguments[3].getType() != Field::Types::UInt64) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Fourth argument of vector similarity index (M) must be of type UInt64"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fourth argument of vector similarity index (hnsw_max_connections_per_layer) must be of type UInt64"); if (index.arguments[4].getType() != Field::Types::UInt64) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Fifth argument of vector similarity index (ef_construction) must be of type UInt64"); - if (index.arguments[5].getType() != Field::Types::UInt64) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Sixth argument of vector similarity index (ef_search) must be of type UInt64"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fifth argument of vector similarity index (hnsw_candidate_list_size_for_construction) must be of type UInt64"); } /// Check that passed arguments are supported @@ -542,18 +554,17 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta throw Exception(ErrorCodes::INCORRECT_DATA, "First argument (method) of vector similarity index is not supported. Supported methods are: {}", joinByComma(methods)); if (!distanceFunctionToMetricKind.contains(index.arguments[1].safeGet())) throw Exception(ErrorCodes::INCORRECT_DATA, "Second argument (distance function) of vector similarity index is not supported. Supported distance function are: {}", joinByComma(distanceFunctionToMetricKind)); - if (has_six_args) + if (has_five_args) { if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); /// Call Usearch's own parameter validation method for HNSW-specific parameters - UInt64 m = index.arguments[3].safeGet(); - UInt64 ef_construction = index.arguments[4].safeGet(); - UInt64 ef_search = index.arguments[5].safeGet(); - - unum::usearch::index_dense_config_t config(m, ef_construction, ef_search); + UInt64 connectivity = index.arguments[3].safeGet(); + UInt64 expansion_add = index.arguments[4].safeGet(); + UInt64 expansion_search = default_expansion_search; + unum::usearch::index_dense_config_t config(connectivity, expansion_add, expansion_search); if (auto error = config.validate(); error) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); } diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index b77473e7c2b..9a81e168393 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -11,11 +11,18 @@ namespace DB { +/// Defaults for HNSW parameters. Instead of using the default parameters provided by USearch (default_connectivity(), +/// default_expansion_add(), default_expansion_search()), we experimentally came up with our own default parameters. They provide better +/// trade-offs with regards to index construction time, search precision and queries-per-second (speed). +static constexpr size_t default_connectivity = 32; +static constexpr size_t default_expansion_add = 128; +static constexpr size_t default_expansion_search = 256; + +/// Parameters for HNSW index construction. struct UsearchHnswParams { - size_t m = unum::usearch::default_connectivity(); - size_t ef_construction = unum::usearch::default_expansion_add(); - size_t ef_search = unum::usearch::default_expansion_search(); + size_t connectivity = default_connectivity; + size_t expansion_add = default_expansion_add; }; using USearchIndex = unum::usearch::index_dense_t; @@ -142,6 +149,7 @@ public: private: const VectorSimilarityCondition vector_similarity_condition; const unum::usearch::metric_kind_t metric_kind; + const size_t expansion_search; }; diff --git a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp index 168134a329f..a271af578cc 100644 --- a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp +++ b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp @@ -3,10 +3,12 @@ #include #include #include +#include #include #include #include #include +#include #include @@ -21,6 +23,11 @@ namespace ProfileEvents namespace DB { +namespace MergeTreeSetting +{ + extern const MergeTreeSettingsString columns_to_prewarm_mark_cache; +} + namespace ErrorCodes { extern const int CANNOT_READ_ALL_DATA; @@ -211,6 +218,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksSync() if (mark_cache) { auto key = MarkCache::hash(fs::path(data_part_storage->getFullPath()) / mrk_path); + if (save_marks_in_cache) { auto callback = [this] { return loadMarksImpl(); }; @@ -249,4 +257,25 @@ std::future MergeTreeMarksLoader::loadMarksAsync() "LoadMarksThread"); } +void addMarksToCache(const IMergeTreeDataPart & part, const PlainMarksByName & cached_marks, MarkCache * mark_cache) +{ + MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; + + for (const auto & [stream_name, marks] : cached_marks) + { + auto mark_path = part.index_granularity_info.getMarksFilePath(stream_name); + auto key = MarkCache::hash(fs::path(part.getDataPartStorage().getFullPath()) / mark_path); + mark_cache->set(key, std::make_shared(*marks)); + } +} + +Names getColumnsToPrewarmMarks(const MergeTreeSettings & settings, const NamesAndTypesList & columns_list) +{ + auto columns_str = settings[MergeTreeSetting::columns_to_prewarm_mark_cache].toString(); + if (columns_str.empty()) + return columns_list.getNames(); + + return parseIdentifiersOrStringLiterals(columns_str, Context::getGlobalContextInstance()->getSettingsRef()); +} + } diff --git a/src/Storages/MergeTree/MergeTreeMarksLoader.h b/src/Storages/MergeTree/MergeTreeMarksLoader.h index 2aa4474e1c5..e031700d6a7 100644 --- a/src/Storages/MergeTree/MergeTreeMarksLoader.h +++ b/src/Storages/MergeTree/MergeTreeMarksLoader.h @@ -1,9 +1,8 @@ #pragma once #include -#include -#include #include +#include namespace DB @@ -11,6 +10,7 @@ namespace DB struct MergeTreeIndexGranularityInfo; using MarksPtr = MarkCache::MappedPtr; +struct ReadSettings; class Threadpool; /// Class that helps to get marks by indexes. @@ -77,4 +77,13 @@ private: using MergeTreeMarksLoaderPtr = std::shared_ptr; +class IMergeTreeDataPart; +struct MergeTreeSettings; + +/// Adds computed marks for part to the marks cache. +void addMarksToCache(const IMergeTreeDataPart & part, const PlainMarksByName & cached_marks, MarkCache * mark_cache); + +/// Returns the list of columns suitable for prewarming of mark cache according to settings. +Names getColumnsToPrewarmMarks(const MergeTreeSettings & settings, const NamesAndTypesList & columns_list); + } diff --git a/src/Storages/MergeTree/MergeTreeMutationStatus.cpp b/src/Storages/MergeTree/MergeTreeMutationStatus.cpp index 6553054774e..e0214d6a79d 100644 --- a/src/Storages/MergeTree/MergeTreeMutationStatus.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationStatus.cpp @@ -26,11 +26,11 @@ void checkMutationStatus(std::optional & status, const throw Exception( ErrorCodes::UNFINISHED, "Exception happened during execution of mutation{} '{}' with part '{}' reason: '{}'. This error maybe retryable or not. " - "In case of unretryable error, mutation can be killed with KILL MUTATION query", + "In case of unretryable error, mutation can be killed with KILL MUTATION query \n\n{}\n", mutation_ids.size() > 1 ? "s" : "", boost::algorithm::join(mutation_ids, ", "), status->latest_failed_part, - status->latest_fail_reason); + status->latest_fail_reason, StackTrace().toString()); } } diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index f128722b03b..28b043fcf20 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -46,6 +46,13 @@ struct MergeTreePartInfo < std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation); } + bool operator>(const MergeTreePartInfo & rhs) const + { + return std::forward_as_tuple(partition_id, min_block, max_block, level, mutation) + > std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation); + } + + bool operator==(const MergeTreePartInfo & rhs) const { return !(*this != rhs); diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 48a4a37f444..e9c9f2b4b06 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -280,7 +280,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME, read_settings, write_settings, cancellation_hook); } - MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage); + MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage, getReadSettings()); cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index a99172c4acd..4e5389f2869 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -1,6 +1,6 @@ +#include #include #include -#include #include #include #include @@ -8,13 +8,13 @@ #include #include #include -#include #include +#include #include #include -#include #include -#include +#include +#include namespace ProfileEvents @@ -102,6 +102,7 @@ MergeTreePrefetchedReadPool::MergeTreePrefetchedReadPool( const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_) : MergeTreeReadPoolBase( std::move(parts_), @@ -113,9 +114,12 @@ MergeTreePrefetchedReadPool::MergeTreePrefetchedReadPool( reader_settings_, column_names_, settings_, + params_, context_) , prefetch_threadpool(getContext()->getPrefetchThreadpool()) - , log(getLogger("MergeTreePrefetchedReadPool(" + (parts_ranges.empty() ? "" : parts_ranges.front().data_part->storage.getStorageID().getNameForLogs()) + ")")) + , log(getLogger( + "MergeTreePrefetchedReadPool(" + + (parts_ranges.empty() ? "" : parts_ranges.front().data_part->storage.getStorageID().getNameForLogs()) + ")")) { /// Tasks creation might also create a lost of readers - check they do not /// do any time consuming operations in ctor. @@ -304,25 +308,11 @@ MergeTreeReadTaskPtr MergeTreePrefetchedReadPool::stealTask(size_t thread, Merge MergeTreeReadTaskPtr MergeTreePrefetchedReadPool::createTask(ThreadTask & task, MergeTreeReadTask * previous_task) { if (task.isValidReadersFuture()) - { - auto size_predictor = task.read_info->shared_size_predictor - ? std::make_unique(*task.read_info->shared_size_predictor) - : nullptr; - - return std::make_unique(task.read_info, task.readers_future->get(), task.ranges, std::move(size_predictor)); - } + return MergeTreeReadPoolBase::createTask(task.read_info, task.readers_future->get(), task.ranges); return MergeTreeReadPoolBase::createTask(task.read_info, task.ranges, previous_task); } -size_t getApproximateSizeOfGranule(const IMergeTreeDataPart & part, const Names & columns_to_read) -{ - ColumnSize columns_size{}; - for (const auto & col_name : columns_to_read) - columns_size.add(part.getColumnSize(col_name)); - return columns_size.data_compressed / part.getMarksCount(); -} - void MergeTreePrefetchedReadPool::fillPerPartStatistics() { per_part_statistics.clear(); @@ -338,11 +328,7 @@ void MergeTreePrefetchedReadPool::fillPerPartStatistics() for (const auto & range : parts_ranges[i].ranges) part_stat.sum_marks += range.end - range.begin; - const auto & columns = settings[Setting::merge_tree_determine_task_size_by_prewhere_columns] && prewhere_info - ? prewhere_info->prewhere_actions.getRequiredColumnsNames() - : column_names; - - part_stat.approx_size_of_mark = getApproximateSizeOfGranule(*read_info.data_part, columns); + part_stat.approx_size_of_mark = read_info.approx_size_of_mark; auto update_stat_for_column = [&](const auto & column_name) { diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h index 1a709250937..b94d4ea113a 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h @@ -27,6 +27,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); String getName() const override { return "PrefetchedReadPool"; } diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 7acc8cd88b4..13ce14e02ec 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -35,7 +35,7 @@ struct PrewhereExprStep bool remove_filter_column = false; bool need_filter = false; - /// Some PREWHERE steps should be executed without conversions. + /// Some PREWHERE steps should be executed without conversions (e.g. early mutation steps) /// A step without alter conversion cannot be executed after step with alter conversions. bool perform_alter_conversions = false; }; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 1e4922757f4..d266ad55824 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -45,6 +45,7 @@ MergeTreeReadPool::MergeTreeReadPool( const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_) : MergeTreeReadPoolBase( std::move(parts_), @@ -56,6 +57,7 @@ MergeTreeReadPool::MergeTreeReadPool( reader_settings_, column_names_, settings_, + params_, context_) , backoff_settings{context_->getSettingsRef()} , backoff_state{pool_settings.threads} diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index c51dca315f9..a0425f0951c 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -34,6 +34,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); ~MergeTreeReadPool() override = default; diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 6ce1726398a..15a87f463b4 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -10,6 +10,7 @@ namespace Setting { extern const SettingsBool merge_tree_determine_task_size_by_prewhere_columns; extern const SettingsUInt64 merge_tree_min_bytes_per_task_for_remote_reading; + extern const SettingsUInt64 merge_tree_min_read_task_size; } namespace ErrorCodes @@ -27,6 +28,7 @@ MergeTreeReadPoolBase::MergeTreeReadPoolBase( const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & pool_settings_, + const MergeTreeReadTask::BlockSizeParams & block_size_params_, const ContextPtr & context_) : WithContext(context_) , parts_ranges(std::move(parts_)) @@ -38,6 +40,7 @@ MergeTreeReadPoolBase::MergeTreeReadPoolBase( , reader_settings(reader_settings_) , column_names(column_names_) , pool_settings(pool_settings_) + , block_size_params(block_size_params_) , owned_mark_cache(context_->getGlobalContext()->getMarkCache()) , owned_uncompressed_cache(pool_settings_.use_uncompressed_cache ? context_->getGlobalContext()->getUncompressedCache() : nullptr) , header(storage_snapshot->getSampleBlockForColumns(column_names)) @@ -46,7 +49,7 @@ MergeTreeReadPoolBase::MergeTreeReadPoolBase( fillPerPartInfos(context_->getSettingsRef()); } -static size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & columns_to_read) +static size_t getSizeOfColumns(const IMergeTreeDataPart & part, const Names & columns_to_read) { ColumnSize columns_size{}; for (const auto & col_name : columns_to_read) @@ -55,44 +58,67 @@ static size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & return columns_size.data_compressed ? columns_size.data_compressed : part.getBytesOnDisk(); } -static size_t calculateMinMarksPerTask( +/// Columns from different prewhere steps are read independently, so it makes sense to use the heaviest set of columns among them as an estimation. +static Names +getHeaviestSetOfColumnsAmongPrewhereSteps(const IMergeTreeDataPart & part, const std::vector & prewhere_steps_columns) +{ + const auto it = std::ranges::max_element( + prewhere_steps_columns, + [&](const auto & lhs, const auto & rhs) + { return getSizeOfColumns(part, lhs.getNames()) < getSizeOfColumns(part, rhs.getNames()); }); + return it->getNames(); +} + +static std::pair // (min_marks_per_task, avg_mark_bytes) +calculateMinMarksPerTask( const RangesInDataPart & part, const Names & columns_to_read, - PrewhereInfoPtr prewhere_info, + const std::vector & prewhere_steps_columns, const MergeTreeReadPoolBase::PoolSettings & pool_settings, const Settings & settings) { - size_t min_marks_per_task = pool_settings.min_marks_for_concurrent_read; - const size_t part_marks_count = part.getMarksCount(); - if (part_marks_count && part.data_part->isStoredOnRemoteDisk()) + size_t min_marks_per_task + = std::max(settings[Setting::merge_tree_min_read_task_size], pool_settings.min_marks_for_concurrent_read); + size_t avg_mark_bytes = 0; + /// It is important to obtain marks count from the part itself instead of calling `part.getMarksCount()`, + /// because `part` will report number of marks selected from this part by the query. + const size_t part_marks_count = part.data_part->getMarksCount(); + if (part_marks_count) { - /// We assume that most of the time prewhere does it's job good meaning that lion's share of the rows is filtered out. - /// Which means in turn that for most of the rows we will read only the columns from prewhere clause. - /// So it makes sense to use only them for the estimation. - const auto & columns = settings[Setting::merge_tree_determine_task_size_by_prewhere_columns] && prewhere_info - ? prewhere_info->prewhere_actions.getRequiredColumnsNames() - : columns_to_read; - const size_t part_compressed_bytes = getApproxSizeOfPart(*part.data_part, columns); - - const auto avg_mark_bytes = std::max(part_compressed_bytes / part_marks_count, 1); - const auto min_bytes_per_task = settings[Setting::merge_tree_min_bytes_per_task_for_remote_reading]; - /// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible. - /// We also create at least two tasks per thread to have something to steal from a slow thread. - const auto heuristic_min_marks - = std::min(pool_settings.sum_marks / pool_settings.threads / 2, min_bytes_per_task / avg_mark_bytes); - if (heuristic_min_marks > min_marks_per_task) + if (part.data_part->isStoredOnRemoteDisk()) { - LOG_TEST( - &Poco::Logger::get("MergeTreeReadPoolBase"), - "Increasing min_marks_per_task from {} to {} based on columns size heuristic", - min_marks_per_task, - heuristic_min_marks); - min_marks_per_task = heuristic_min_marks; + /// We assume that most of the time prewhere does it's job good meaning that lion's share of the rows is filtered out. + /// Which means in turn that for most of the rows we will read only the columns from prewhere clause. + /// So it makes sense to use only them for the estimation. + const auto & columns = settings[Setting::merge_tree_determine_task_size_by_prewhere_columns] && !prewhere_steps_columns.empty() + ? getHeaviestSetOfColumnsAmongPrewhereSteps(*part.data_part, prewhere_steps_columns) + : columns_to_read; + const size_t part_compressed_bytes = getSizeOfColumns(*part.data_part, columns); + + avg_mark_bytes = std::max(part_compressed_bytes / part_marks_count, 1); + const auto min_bytes_per_task = settings[Setting::merge_tree_min_bytes_per_task_for_remote_reading]; + /// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible. + /// We also create at least two tasks per thread to have something to steal from a slow thread. + const auto heuristic_min_marks + = std::min(pool_settings.sum_marks / pool_settings.threads / 2, min_bytes_per_task / avg_mark_bytes); + if (heuristic_min_marks > min_marks_per_task) + { + LOG_TEST( + &Poco::Logger::get("MergeTreeReadPoolBase"), + "Increasing min_marks_per_task from {} to {} based on columns size heuristic", + min_marks_per_task, + heuristic_min_marks); + min_marks_per_task = heuristic_min_marks; + } + } + else + { + avg_mark_bytes = std::max(getSizeOfColumns(*part.data_part, columns_to_read) / part_marks_count, 1); } } LOG_TEST(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); - return min_marks_per_task; + return {min_marks_per_task, avg_mark_bytes}; } void MergeTreeReadPoolBase::fillPerPartInfos(const Settings & settings) @@ -159,8 +185,8 @@ void MergeTreeReadPoolBase::fillPerPartInfos(const Settings & settings) } is_part_on_remote_disk.push_back(part_with_ranges.data_part->isStoredOnRemoteDisk()); - read_task_info.min_marks_per_task - = calculateMinMarksPerTask(part_with_ranges, column_names, prewhere_info, pool_settings, settings); + std::tie(read_task_info.min_marks_per_task, read_task_info.approx_size_of_mark) + = calculateMinMarksPerTask(part_with_ranges, column_names, read_task_info.task_columns.pre_columns, pool_settings, settings); per_part_infos.push_back(std::make_shared(std::move(read_task_info))); } } @@ -182,15 +208,20 @@ std::vector MergeTreeReadPoolBase::getPerPartSumMarks() const return per_part_sum_marks; } -MergeTreeReadTaskPtr MergeTreeReadPoolBase::createTask( - MergeTreeReadTaskInfoPtr read_info, - MarkRanges ranges, - MergeTreeReadTask * previous_task) const +MergeTreeReadTaskPtr +MergeTreeReadPoolBase::createTask(MergeTreeReadTaskInfoPtr read_info, MergeTreeReadTask::Readers task_readers, MarkRanges ranges) const { auto task_size_predictor = read_info->shared_size_predictor ? std::make_unique(*read_info->shared_size_predictor) : nullptr; /// make a copy + return std::make_unique( + read_info, std::move(task_readers), std::move(ranges), block_size_params, std::move(task_size_predictor)); +} + +MergeTreeReadTaskPtr +MergeTreeReadPoolBase::createTask(MergeTreeReadTaskInfoPtr read_info, MarkRanges ranges, MergeTreeReadTask * previous_task) const +{ auto get_part_name = [](const auto & task_info) -> String { const auto & data_part = task_info.data_part; @@ -229,11 +260,7 @@ MergeTreeReadTaskPtr MergeTreeReadPoolBase::createTask( task_readers = previous_task->releaseReaders(); } - return std::make_unique( - read_info, - std::move(task_readers), - std::move(ranges), - std::move(task_size_predictor)); + return createTask(read_info, std::move(task_readers), std::move(ranges)); } MergeTreeReadTask::Extras MergeTreeReadPoolBase::getExtras() const diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.h b/src/Storages/MergeTree/MergeTreeReadPoolBase.h index 7f9106d476e..19b26156433 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.h @@ -33,6 +33,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); Block getHeader() const override { return header; } @@ -48,6 +49,7 @@ protected: const MergeTreeReaderSettings reader_settings; const Names column_names; const PoolSettings pool_settings; + const MergeTreeReadTask::BlockSizeParams block_size_params; const MarkCachePtr owned_mark_cache; const UncompressedCachePtr owned_uncompressed_cache; const Block header; @@ -55,6 +57,8 @@ protected: void fillPerPartInfos(const Settings & settings); std::vector getPerPartSumMarks() const; + MergeTreeReadTaskPtr createTask(MergeTreeReadTaskInfoPtr read_info, MergeTreeReadTask::Readers task_readers, MarkRanges ranges) const; + MergeTreeReadTaskPtr createTask( MergeTreeReadTaskInfoPtr read_info, MarkRanges ranges, diff --git a/src/Storages/MergeTree/MergeTreeReadPoolInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolInOrder.cpp index 60f127acdae..c4244ecd982 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolInOrder.cpp @@ -20,6 +20,7 @@ MergeTreeReadPoolInOrder::MergeTreeReadPoolInOrder( const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_) : MergeTreeReadPoolBase( std::move(parts_), @@ -31,6 +32,7 @@ MergeTreeReadPoolInOrder::MergeTreeReadPoolInOrder( reader_settings_, column_names_, settings_, + params_, context_) , has_limit_below_one_block(has_limit_below_one_block_) , read_type(read_type_) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolInOrder.h b/src/Storages/MergeTree/MergeTreeReadPoolInOrder.h index a3668acb170..41f3ab1061c 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolInOrder.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolInOrder.h @@ -19,6 +19,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); String getName() const override { return "ReadPoolInOrder"; } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp index 075c0b1042b..8f06fc312c2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp @@ -112,6 +112,7 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_) : MergeTreeReadPoolBase( std::move(parts_), @@ -123,6 +124,7 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( reader_settings_, column_names_, settings_, + params_, context_) , extension(std::move(extension_)) , coordination_mode(CoordinationMode::Default) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h index b9f2e133c4a..63816340eb1 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h @@ -19,6 +19,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); ~MergeTreeReadPoolParallelReplicas() override = default; diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp index 8ff2a4f31ee..f13da426c45 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp @@ -26,6 +26,7 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_) : MergeTreeReadPoolBase( std::move(parts_), @@ -37,6 +38,7 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd reader_settings_, column_names_, settings_, + params_, context_) , extension(std::move(extension_)) , mode(mode_) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h index 98a4d95768a..a05dc54b529 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h @@ -20,6 +20,7 @@ public: const MergeTreeReaderSettings & reader_settings_, const Names & column_names_, const PoolSettings & settings_, + const MergeTreeReadTask::BlockSizeParams & params_, const ContextPtr & context_); String getName() const override { return "ReadPoolParallelReplicasInOrder"; } diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index dd057dc9984..72fddb93a6d 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -26,10 +26,12 @@ MergeTreeReadTask::MergeTreeReadTask( MergeTreeReadTaskInfoPtr info_, Readers readers_, MarkRanges mark_ranges_, + const BlockSizeParams & block_size_params_, MergeTreeBlockSizePredictorPtr size_predictor_) : info(std::move(info_)) , readers(std::move(readers_)) , mark_ranges(std::move(mark_ranges_)) + , block_size_params(block_size_params_) , size_predictor(std::move(size_predictor_)) { } @@ -112,30 +114,31 @@ void MergeTreeReadTask::initializeRangeReaders(const PrewhereExprInfo & prewhere range_readers = createRangeReaders(readers, prewhere_actions); } -UInt64 MergeTreeReadTask::estimateNumRows(const BlockSizeParams & params) const +UInt64 MergeTreeReadTask::estimateNumRows() const { if (!size_predictor) { - if (params.preferred_block_size_bytes) + if (block_size_params.preferred_block_size_bytes) throw Exception(ErrorCodes::LOGICAL_ERROR, "Size predictor is not set, it might lead to a performance degradation"); - return static_cast(params.max_block_size_rows); + return static_cast(block_size_params.max_block_size_rows); } /// Calculates number of rows will be read using preferred_block_size_bytes. /// Can't be less than avg_index_granularity. - size_t rows_to_read = size_predictor->estimateNumRows(params.preferred_block_size_bytes); + size_t rows_to_read = size_predictor->estimateNumRows(block_size_params.preferred_block_size_bytes); if (!rows_to_read) return rows_to_read; auto total_row_in_current_granule = range_readers.main.numRowsInCurrentGranule(); rows_to_read = std::max(total_row_in_current_granule, rows_to_read); - if (params.preferred_max_column_in_block_size_bytes) + if (block_size_params.preferred_max_column_in_block_size_bytes) { /// Calculates number of rows will be read using preferred_max_column_in_block_size_bytes. - auto rows_to_read_for_max_size_column = size_predictor->estimateNumRowsForMaxSizeColumn(params.preferred_max_column_in_block_size_bytes); + auto rows_to_read_for_max_size_column + = size_predictor->estimateNumRowsForMaxSizeColumn(block_size_params.preferred_max_column_in_block_size_bytes); - double filtration_ratio = std::max(params.min_filtration_ratio, 1.0 - size_predictor->filtered_rows_ratio); + double filtration_ratio = std::max(block_size_params.min_filtration_ratio, 1.0 - size_predictor->filtered_rows_ratio); auto rows_to_read_for_max_size_column_with_filtration = static_cast(rows_to_read_for_max_size_column / filtration_ratio); @@ -148,16 +151,16 @@ UInt64 MergeTreeReadTask::estimateNumRows(const BlockSizeParams & params) const return rows_to_read; const auto & index_granularity = info->data_part->index_granularity; - return index_granularity.countRowsForRows(range_readers.main.currentMark(), rows_to_read, range_readers.main.numReadRowsInCurrentGranule(), params.min_marks_to_read); + return index_granularity.countRowsForRows(range_readers.main.currentMark(), rows_to_read, range_readers.main.numReadRowsInCurrentGranule()); } -MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParams & params) +MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read() { if (size_predictor) size_predictor->startBlock(); - UInt64 recommended_rows = estimateNumRows(params); - UInt64 rows_to_read = std::max(static_cast(1), std::min(params.max_block_size_rows, recommended_rows)); + UInt64 recommended_rows = estimateNumRows(); + UInt64 rows_to_read = std::max(static_cast(1), std::min(block_size_params.max_block_size_rows, recommended_rows)); auto read_result = range_readers.main.read(rows_to_read, mark_ranges); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index 748babb5b4c..2853cc39c51 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -70,6 +70,7 @@ struct MergeTreeReadTaskInfo VirtualFields const_virtual_fields; /// The amount of data to read per task based on size of the queried columns. size_t min_marks_per_task = 0; + size_t approx_size_of_mark = 0; }; using MergeTreeReadTaskInfoPtr = std::shared_ptr; @@ -110,7 +111,6 @@ public: UInt64 max_block_size_rows = DEFAULT_BLOCK_SIZE; UInt64 preferred_block_size_bytes = 1000000; UInt64 preferred_max_column_in_block_size_bytes = 0; - UInt64 min_marks_to_read = 0; double min_filtration_ratio = 0.00001; }; @@ -127,12 +127,12 @@ public: MergeTreeReadTaskInfoPtr info_, Readers readers_, MarkRanges mark_ranges_, - + const BlockSizeParams & block_size_params_, MergeTreeBlockSizePredictorPtr size_predictor_); void initializeRangeReaders(const PrewhereExprInfo & prewhere_actions); - BlockAndProgress read(const BlockSizeParams & params); + BlockAndProgress read(); bool isFinished() const { return mark_ranges.empty() && range_readers.main.isCurrentRangeFinished(); } const MergeTreeReadTaskInfo & getInfo() const { return *info; } @@ -145,7 +145,7 @@ public: static RangeReaders createRangeReaders(const Readers & readers, const PrewhereExprInfo & prewhere_actions); private: - UInt64 estimateNumRows(const BlockSizeParams & params) const; + UInt64 estimateNumRows() const; /// Shared information required for reading. MergeTreeReadTaskInfoPtr info; @@ -160,6 +160,8 @@ private: /// Ranges to read from data_part MarkRanges mark_ranges; + BlockSizeParams block_size_params; + /// Used to satistfy preferred_block_size_bytes limitation MergeTreeBlockSizePredictorPtr size_predictor; }; diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 898bf5a2933..77231d8d392 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -262,7 +262,7 @@ MergeTreeReaderWide::FileStreams::iterator MergeTreeReaderWide::addStream(const /*num_columns_in_mark=*/ 1); auto stream_settings = settings; - stream_settings.is_low_cardinality_dictionary = substream_path.size() > 1 && substream_path[substream_path.size() - 2].type == ISerialization::Substream::Type::DictionaryKeys; + stream_settings.is_low_cardinality_dictionary = ISerialization::isLowCardinalityDictionarySubcolumn(substream_path); auto create_stream = [&]() { diff --git a/src/Storages/MergeTree/MergeTreeSelectAlgorithms.cpp b/src/Storages/MergeTree/MergeTreeSelectAlgorithms.cpp index bf97d269dc6..213eab52ad8 100644 --- a/src/Storages/MergeTree/MergeTreeSelectAlgorithms.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectAlgorithms.cpp @@ -30,7 +30,8 @@ MergeTreeReadTaskPtr MergeTreeInReverseOrderSelectAlgorithm::getNewTask(IMergeTr return pool.getTask(part_idx, previous_task); } -MergeTreeReadTask::BlockAndProgress MergeTreeInReverseOrderSelectAlgorithm::readFromTask(MergeTreeReadTask & task, const BlockSizeParams & params) +MergeTreeReadTask::BlockAndProgress +MergeTreeInReverseOrderSelectAlgorithm::readFromTask(MergeTreeReadTask & task) { MergeTreeReadTask::BlockAndProgress res; @@ -42,7 +43,7 @@ MergeTreeReadTask::BlockAndProgress MergeTreeInReverseOrderSelectAlgorithm::read } while (!task.isFinished()) - chunks.push_back(task.read(params)); + chunks.push_back(task.read()); if (chunks.empty()) return {}; diff --git a/src/Storages/MergeTree/MergeTreeSelectAlgorithms.h b/src/Storages/MergeTree/MergeTreeSelectAlgorithms.h index afc8032bb99..eeaefb0dc4f 100644 --- a/src/Storages/MergeTree/MergeTreeSelectAlgorithms.h +++ b/src/Storages/MergeTree/MergeTreeSelectAlgorithms.h @@ -21,7 +21,7 @@ public: virtual bool needNewTask(const MergeTreeReadTask & task) const = 0; virtual MergeTreeReadTaskPtr getNewTask(IMergeTreeReadPool & pool, MergeTreeReadTask * previous_task) = 0; - virtual BlockAndProgress readFromTask(MergeTreeReadTask & task, const BlockSizeParams & params) = 0; + virtual BlockAndProgress readFromTask(MergeTreeReadTask & task) = 0; }; using MergeTreeSelectAlgorithmPtr = std::unique_ptr; @@ -35,7 +35,7 @@ public: bool needNewTask(const MergeTreeReadTask & task) const override { return task.isFinished(); } MergeTreeReadTaskPtr getNewTask(IMergeTreeReadPool & pool, MergeTreeReadTask * previous_task) override { return pool.getTask(thread_idx, previous_task); } - BlockAndProgress readFromTask(MergeTreeReadTask & task, const BlockSizeParams & params) override { return task.read(params); } + BlockAndProgress readFromTask(MergeTreeReadTask & task) override { return task.read(); } private: const size_t thread_idx; @@ -50,7 +50,7 @@ public: bool needNewTask(const MergeTreeReadTask & task) const override { return task.isFinished(); } MergeTreeReadTaskPtr getNewTask(IMergeTreeReadPool & pool, MergeTreeReadTask * previous_task) override; - MergeTreeReadTask::BlockAndProgress readFromTask(MergeTreeReadTask & task, const BlockSizeParams & params) override { return task.read(params); } + MergeTreeReadTask::BlockAndProgress readFromTask(MergeTreeReadTask & task) override { return task.read(); } private: const size_t part_idx; @@ -65,7 +65,7 @@ public: bool needNewTask(const MergeTreeReadTask & task) const override { return chunks.empty() && task.isFinished(); } MergeTreeReadTaskPtr getNewTask(IMergeTreeReadPool & pool, MergeTreeReadTask * previous_task) override; - BlockAndProgress readFromTask(MergeTreeReadTask & task, const BlockSizeParams & params) override; + BlockAndProgress readFromTask(MergeTreeReadTask & task) override; private: const size_t part_idx; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 76bcf41d6d8..5efd33ce09a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -86,7 +86,6 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( MergeTreeSelectAlgorithmPtr algorithm_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, - const MergeTreeReadTask::BlockSizeParams & block_size_params_, const MergeTreeReaderSettings & reader_settings_) : pool(std::move(pool_)) , algorithm(std::move(algorithm_)) @@ -94,7 +93,6 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( , actions_settings(actions_settings_) , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps)) , reader_settings(reader_settings_) - , block_size_params(block_size_params_) , result_header(transformHeader(pool->getHeader(), prewhere_info)) { if (reader_settings.apply_deleted_mask) @@ -190,7 +188,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - auto res = algorithm->readFromTask(*task, block_size_params); + auto res = algorithm->readFromTask(*task); if (res.row_count) { diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 8a9e3580a9f..33069a78e33 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -57,7 +57,6 @@ public: MergeTreeSelectAlgorithmPtr algorithm_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, - const MergeTreeReadTask::BlockSizeParams & block_size_params_, const MergeTreeReaderSettings & reader_settings_); String getName() const; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 4e7d0c0a721..33910d1048d 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -29,218 +30,220 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } +// clang-format off + /** These settings represent fine tunes for internal details of MergeTree storages * and should not be changed by the user without a reason. */ - -#define MERGE_TREE_SETTINGS(M, ALIAS) \ - M(UInt64, min_compress_block_size, 0, "When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.", 0) \ - M(UInt64, max_compress_block_size, 0, "Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.", 0) \ - M(UInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ - M(UInt64, max_digestion_size_per_segment, 256_MiB, "Max number of bytes to digest per segment to build GIN index.", 0) \ +#define MERGE_TREE_SETTINGS(DECLARE, ALIAS) \ + DECLARE(UInt64, min_compress_block_size, 0, "When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.", 0) \ + DECLARE(UInt64, max_compress_block_size, 0, "Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.", 0) \ + DECLARE(UInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ + DECLARE(UInt64, max_digestion_size_per_segment, 256_MiB, "Max number of bytes to digest per segment to build GIN index.", 0) \ \ /** Data storing format settings. */ \ - M(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ - M(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ - M(Float, ratio_of_defaults_for_sparse_serialization, 0.9375f, "Minimal ratio of number of default values to number of all values in column to store it in sparse serializations. If >= 1, columns will be always written in full serialization.", 0) \ - M(Bool, replace_long_file_name_to_hash, true, "If the file name for column is too long (more than 'max_file_name_length' bytes) replace it to SipHash128", 0) \ - M(UInt64, max_file_name_length, 127, "The maximal length of the file name to keep it as is without hashing", 0) \ - M(UInt64, min_bytes_for_full_part_storage, 0, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, min_rows_for_full_part_storage, 0, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, compact_parts_max_bytes_to_buffer, 128 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, compact_parts_max_granules_to_buffer, 128, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, compact_parts_merge_max_bytes_to_prefetch_part, 16 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ - M(Bool, load_existing_rows_count_for_old_parts, false, "Whether to load existing_rows_count for existing parts. If false, existing_rows_count will be equal to rows_count for existing parts.", 0) \ - M(Bool, use_compact_variant_discriminators_serialization, true, "Use compact version of Variant discriminators serialization.", 0) \ - \ - /** Merge and insert settings */ \ - M(UInt64, max_compression_threads, 1, "Maximum number of threads for writing compressed data. This is an expert-level setting, do not change it.", 0) \ + DECLARE(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ + DECLARE(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ + DECLARE(Float, ratio_of_defaults_for_sparse_serialization, 0.9375f, "Minimal ratio of number of default values to number of all values in column to store it in sparse serializations. If >= 1, columns will be always written in full serialization.", 0) \ + DECLARE(Bool, replace_long_file_name_to_hash, true, "If the file name for column is too long (more than 'max_file_name_length' bytes) replace it to SipHash128", 0) \ + DECLARE(UInt64, max_file_name_length, 127, "The maximal length of the file name to keep it as is without hashing", 0) \ + DECLARE(UInt64, min_bytes_for_full_part_storage, 0, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, min_rows_for_full_part_storage, 0, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, compact_parts_max_bytes_to_buffer, 128 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, compact_parts_max_granules_to_buffer, 128, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, compact_parts_merge_max_bytes_to_prefetch_part, 16 * 1024 * 1024, "Only available in ClickHouse Cloud", 0) \ + DECLARE(Bool, load_existing_rows_count_for_old_parts, false, "Whether to load existing_rows_count for existing parts. If false, existing_rows_count will be equal to rows_count for existing parts.", 0) \ + DECLARE(Bool, use_compact_variant_discriminators_serialization, true, "Use compact version of Variant discriminators serialization.", 0) \ \ /** Merge selector settings. */ \ - M(UInt64, merge_selector_blurry_base_scale_factor, 0, "Controls when the logic kicks in relatively to the number of parts in partition. The bigger the factor the more belated reaction will be.", 0) \ - M(UInt64, merge_selector_window_size, 1000, "How many parts to look at once.", 0) \ + DECLARE(UInt64, merge_selector_blurry_base_scale_factor, 0, "Controls when the logic kicks in relatively to the number of parts in partition. The bigger the factor the more belated reaction will be.", 0) \ + DECLARE(UInt64, merge_selector_window_size, 1000, "How many parts to look at once.", 0) \ \ /** Merge settings. */ \ - M(UInt64, merge_max_block_size, 8192, "How many rows in blocks should be formed for merge operations. By default has the same value as `index_granularity`.", 0) \ - M(UInt64, merge_max_block_size_bytes, 10 * 1024 * 1024, "How many bytes in blocks should be formed for merge operations. By default has the same value as `index_granularity_bytes`.", 0) \ - M(UInt64, max_bytes_to_merge_at_max_space_in_pool, 150ULL * 1024 * 1024 * 1024, "Maximum in total size of parts to merge, when there are maximum free threads in background pool (or entries in replication queue).", 0) \ - M(UInt64, max_bytes_to_merge_at_min_space_in_pool, 1024 * 1024, "Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries in replication queue).", 0) \ - M(UInt64, max_replicated_merges_in_queue, 1000, "How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ - M(UInt64, max_replicated_mutations_in_queue, 8, "How many tasks of mutating parts are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ - M(UInt64, max_replicated_merges_with_ttl_in_queue, 1, "How many tasks of merging parts with TTL are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ - M(UInt64, number_of_free_entries_in_pool_to_lower_max_size_of_merge, 8, "When there is less than specified number of free entries in pool (or replicated queue), start to lower maximum size of merge to process (or to put in queue). This is to allow small merges to process - not filling the pool with long running merges.", 0) \ - M(UInt64, number_of_free_entries_in_pool_to_execute_mutation, 20, "When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ - M(UInt64, max_number_of_mutations_for_replica, 0, "Limit the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings).", 0) \ - M(UInt64, max_number_of_merges_with_ttl_in_pool, 2, "When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ - M(Seconds, old_parts_lifetime, 8 * 60, "How many seconds to keep obsolete parts.", 0) \ - M(Seconds, temporary_directories_lifetime, 86400, "How many seconds to keep tmp_-directories. You should not lower this value because merges and mutations may not be able to work with low value of this setting.", 0) \ - M(Seconds, lock_acquire_timeout_for_background_operations, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "For background operations like merges, mutations etc. How many seconds before failing to acquire table locks.", 0) \ - M(UInt64, min_rows_to_fsync_after_merge, 0, "Minimal number of rows to do fsync for part after merge (0 - disabled)", 0) \ - M(UInt64, min_compressed_bytes_to_fsync_after_merge, 0, "Minimal number of compressed bytes to do fsync for part after merge (0 - disabled)", 0) \ - M(UInt64, min_compressed_bytes_to_fsync_after_fetch, 0, "Minimal number of compressed bytes to do fsync for part after fetch (0 - disabled)", 0) \ - M(Bool, fsync_after_insert, false, "Do fsync for every inserted part. Significantly decreases performance of inserts, not recommended to use with wide parts.", 0) \ - M(Bool, fsync_part_directory, false, "Do fsync for part directory after all part operations (writes, renames, etc.).", 0) \ - M(UInt64, non_replicated_deduplication_window, 0, "How many last blocks of hashes should be kept on disk (0 - disabled).", 0) \ - M(UInt64, max_parts_to_merge_at_once, 100, "Max amount of parts which can be merged at once (0 - disabled). Doesn't affect OPTIMIZE FINAL query.", 0) \ - M(UInt64, merge_selecting_sleep_ms, 5000, "Minimum time to wait before trying to select parts to merge again after no parts were selected. A lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ - M(UInt64, max_merge_selecting_sleep_ms, 60000, "Maximum time to wait before trying to select parts to merge again after no parts were selected. A lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ - M(Float, merge_selecting_sleep_slowdown_factor, 1.2f, "The sleep time for merge selecting task is multiplied by this factor when there's nothing to merge and divided when a merge was assigned", 0) \ - M(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ - M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ - M(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \ - M(Bool, min_age_to_force_merge_on_partition_only, false, "Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.", false) \ - M(UInt64, number_of_free_entries_in_pool_to_execute_optimize_entire_partition, 25, "When there is less than specified number of free entries in pool, do not try to execute optimize entire partition with a merge (this merge is created when set min_age_to_force_merge_seconds > 0 and min_age_to_force_merge_on_partition_only = true). This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ - M(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \ - M(UInt64, replicated_max_mutations_in_one_entry, 10000, "Max number of mutation commands that can be merged together and executed in one MUTATE_PART entry (0 means unlimited)", 0) \ - M(UInt64, number_of_mutations_to_delay, 500, "If table has at least that many unfinished mutations, artificially slow down mutations of table. Disabled if set to 0", 0) \ - M(UInt64, number_of_mutations_to_throw, 1000, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \ - M(UInt64, min_delay_to_mutate_ms, 10, "Min delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ - M(UInt64, max_delay_to_mutate_ms, 1000, "Max delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ - M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ - M(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ - M(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ - M(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ - M(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", 0) \ + DECLARE(UInt64, merge_max_block_size, 8192, "How many rows in blocks should be formed for merge operations. By default has the same value as `index_granularity`.", 0) \ + DECLARE(UInt64, merge_max_block_size_bytes, 10 * 1024 * 1024, "How many bytes in blocks should be formed for merge operations. By default has the same value as `index_granularity_bytes`.", 0) \ + DECLARE(UInt64, max_bytes_to_merge_at_max_space_in_pool, 150ULL * 1024 * 1024 * 1024, "Maximum in total size of parts to merge, when there are maximum free threads in background pool (or entries in replication queue).", 0) \ + DECLARE(UInt64, max_bytes_to_merge_at_min_space_in_pool, 1024 * 1024, "Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries in replication queue).", 0) \ + DECLARE(UInt64, max_replicated_merges_in_queue, 1000, "How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ + DECLARE(UInt64, max_replicated_mutations_in_queue, 8, "How many tasks of mutating parts are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ + DECLARE(UInt64, max_replicated_merges_with_ttl_in_queue, 1, "How many tasks of merging parts with TTL are allowed simultaneously in ReplicatedMergeTree queue.", 0) \ + DECLARE(UInt64, number_of_free_entries_in_pool_to_lower_max_size_of_merge, 8, "When there is less than specified number of free entries in pool (or replicated queue), start to lower maximum size of merge to process (or to put in queue). This is to allow small merges to process - not filling the pool with long running merges.", 0) \ + DECLARE(UInt64, number_of_free_entries_in_pool_to_execute_mutation, 20, "When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ + DECLARE(UInt64, max_number_of_mutations_for_replica, 0, "Limit the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings).", 0) \ + DECLARE(UInt64, max_number_of_merges_with_ttl_in_pool, 2, "When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ + DECLARE(Seconds, old_parts_lifetime, 8 * 60, "How many seconds to keep obsolete parts.", 0) \ + DECLARE(Seconds, temporary_directories_lifetime, 86400, "How many seconds to keep tmp_-directories. You should not lower this value because merges and mutations may not be able to work with low value of this setting.", 0) \ + DECLARE(Seconds, lock_acquire_timeout_for_background_operations, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "For background operations like merges, mutations etc. How many seconds before failing to acquire table locks.", 0) \ + DECLARE(UInt64, min_rows_to_fsync_after_merge, 0, "Minimal number of rows to do fsync for part after merge (0 - disabled)", 0) \ + DECLARE(UInt64, min_compressed_bytes_to_fsync_after_merge, 0, "Minimal number of compressed bytes to do fsync for part after merge (0 - disabled)", 0) \ + DECLARE(UInt64, min_compressed_bytes_to_fsync_after_fetch, 0, "Minimal number of compressed bytes to do fsync for part after fetch (0 - disabled)", 0) \ + DECLARE(Bool, fsync_after_insert, false, "Do fsync for every inserted part. Significantly decreases performance of inserts, not recommended to use with wide parts.", 0) \ + DECLARE(Bool, fsync_part_directory, false, "Do fsync for part directory after all part operations (writes, renames, etc.).", 0) \ + DECLARE(UInt64, non_replicated_deduplication_window, 0, "How many last blocks of hashes should be kept on disk (0 - disabled).", 0) \ + DECLARE(UInt64, max_parts_to_merge_at_once, 100, "Max amount of parts which can be merged at once (0 - disabled). Doesn't affect OPTIMIZE FINAL query.", 0) \ + DECLARE(UInt64, merge_selecting_sleep_ms, 5000, "Minimum time to wait before trying to select parts to merge again after no parts were selected. A lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + DECLARE(UInt64, max_merge_selecting_sleep_ms, 60000, "Maximum time to wait before trying to select parts to merge again after no parts were selected. A lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + DECLARE(Float, merge_selecting_sleep_slowdown_factor, 1.2f, "The sleep time for merge selecting task is multiplied by this factor when there's nothing to merge and divided when a merge was assigned", 0) \ + DECLARE(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ + DECLARE(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ + DECLARE(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \ + DECLARE(Bool, min_age_to_force_merge_on_partition_only, false, "Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.", false) \ + DECLARE(UInt64, number_of_free_entries_in_pool_to_execute_optimize_entire_partition, 25, "When there is less than specified number of free entries in pool, do not try to execute optimize entire partition with a merge (this merge is created when set min_age_to_force_merge_seconds > 0 and min_age_to_force_merge_on_partition_only = true). This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ + DECLARE(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", EXPERIMENTAL) \ + DECLARE(UInt64, replicated_max_mutations_in_one_entry, 10000, "Max number of mutation commands that can be merged together and executed in one MUTATE_PART entry (0 means unlimited)", 0) \ + DECLARE(UInt64, number_of_mutations_to_delay, 500, "If table has at least that many unfinished mutations, artificially slow down mutations of table. Disabled if set to 0", 0) \ + DECLARE(UInt64, number_of_mutations_to_throw, 1000, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \ + DECLARE(UInt64, min_delay_to_mutate_ms, 10, "Min delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ + DECLARE(UInt64, max_delay_to_mutate_ms, 1000, "Max delay of mutating MergeTree table in milliseconds, if there are a lot of unfinished mutations", 0) \ + DECLARE(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ + DECLARE(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ + DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ + DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ + DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \ + DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \ + DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \ \ /** Inserts settings. */ \ - M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ - M(UInt64, inactive_parts_to_delay_insert, 0, "If table contains at least that many inactive parts in single partition, artificially slow down insert into table.", 0) \ - M(UInt64, parts_to_throw_insert, 3000, "If more than this number active parts in single partition, throw 'Too many parts ...' exception.", 0) \ - M(UInt64, inactive_parts_to_throw_insert, 0, "If more than this number inactive parts in single partition, throw 'Too many inactive parts ...' exception.", 0) \ - M(UInt64, max_avg_part_size_for_too_many_parts, 1ULL * 1024 * 1024 * 1024, "The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.", 0) \ - M(UInt64, max_delay_to_insert, 1, "Max delay of inserting data into MergeTree table in seconds, if there are a lot of unmerged parts in single partition.", 0) \ - M(UInt64, min_delay_to_insert_ms, 10, "Min delay of inserting data into MergeTree table in milliseconds, if there are a lot of unmerged parts in single partition.", 0) \ - M(UInt64, max_parts_in_total, 100000, "If more than this number active parts in all partitions in total, throw 'Too many parts ...' exception.", 0) \ - M(Bool, async_insert, false, "If true, data from INSERT query is stored in queue and later flushed to table in background.", 0) \ - M(Bool, add_implicit_sign_column_constraint_for_collapsing_engine, false, "If true, add implicit constraint for sign column for CollapsingMergeTree engine.", 0) \ - M(Milliseconds, sleep_before_commit_local_part_in_replicated_table_ms, 0, "For testing. Do not change it.", 0) \ - M(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \ - M(Bool, use_adaptive_write_buffer_for_dynamic_subcolumns, true, "Allow to use adaptive writer buffers during writing dynamic subcolumns to reduce memory usage", 0) \ - M(UInt64, adaptive_write_buffer_initial_size, 16 * 1024, "Initial size of an adaptive write buffer", 0) \ - M(UInt64, min_free_disk_bytes_to_perform_insert, 0, "Minimum free disk space bytes to perform an insert.", 0) \ - M(Float, min_free_disk_ratio_to_perform_insert, 0.0, "Minimum free disk space ratio to perform an insert.", 0) \ + DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ + DECLARE(UInt64, inactive_parts_to_delay_insert, 0, "If table contains at least that many inactive parts in single partition, artificially slow down insert into table.", 0) \ + DECLARE(UInt64, parts_to_throw_insert, 3000, "If more than this number active parts in single partition, throw 'Too many parts ...' exception.", 0) \ + DECLARE(UInt64, inactive_parts_to_throw_insert, 0, "If more than this number inactive parts in single partition, throw 'Too many inactive parts ...' exception.", 0) \ + DECLARE(UInt64, max_avg_part_size_for_too_many_parts, 1ULL * 1024 * 1024 * 1024, "The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.", 0) \ + DECLARE(UInt64, max_delay_to_insert, 1, "Max delay of inserting data into MergeTree table in seconds, if there are a lot of unmerged parts in single partition.", 0) \ + DECLARE(UInt64, min_delay_to_insert_ms, 10, "Min delay of inserting data into MergeTree table in milliseconds, if there are a lot of unmerged parts in single partition.", 0) \ + DECLARE(UInt64, max_parts_in_total, 100000, "If more than this number active parts in all partitions in total, throw 'Too many parts ...' exception.", 0) \ + DECLARE(Bool, async_insert, false, "If true, data from INSERT query is stored in queue and later flushed to table in background.", 0) \ + DECLARE(Bool, add_implicit_sign_column_constraint_for_collapsing_engine, false, "If true, add implicit constraint for sign column for CollapsingMergeTree engine.", 0) \ + DECLARE(Milliseconds, sleep_before_commit_local_part_in_replicated_table_ms, 0, "For testing. Do not change it.", 0) \ + DECLARE(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \ + DECLARE(Bool, use_adaptive_write_buffer_for_dynamic_subcolumns, true, "Allow to use adaptive writer buffers during writing dynamic subcolumns to reduce memory usage", 0) \ + DECLARE(UInt64, adaptive_write_buffer_initial_size, 16 * 1024, "Initial size of an adaptive write buffer", 0) \ + DECLARE(UInt64, min_free_disk_bytes_to_perform_insert, 0, "Minimum free disk space bytes to perform an insert.", 0) \ + DECLARE(Float, min_free_disk_ratio_to_perform_insert, 0.0, "Minimum free disk space ratio to perform an insert.", 0) \ \ /* Part removal settings. */ \ - M(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \ + DECLARE(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \ \ /** Replication settings. */ \ - M(UInt64, replicated_deduplication_window, 1000, "How many last blocks of hashes should be kept in ZooKeeper (old blocks will be deleted).", 0) \ - M(UInt64, replicated_deduplication_window_seconds, 7 * 24 * 60 * 60 /* one week */, "Similar to \"replicated_deduplication_window\", but determines old blocks by their lifetime. Hash of an inserted block will be deleted (and the block will not be deduplicated after) if it outside of one \"window\". You can set very big replicated_deduplication_window to avoid duplicating INSERTs during that period of time.", 0) \ - M(UInt64, replicated_deduplication_window_for_async_inserts, 10000, "How many last hash values of async_insert blocks should be kept in ZooKeeper (old blocks will be deleted).", 0) \ - M(UInt64, replicated_deduplication_window_seconds_for_async_inserts, 7 * 24 * 60 * 60 /* one week */, "Similar to \"replicated_deduplication_window_for_async_inserts\", but determines old blocks by their lifetime. Hash of an inserted block will be deleted (and the block will not be deduplicated after) if it outside of one \"window\". You can set very big replicated_deduplication_window to avoid duplicating INSERTs during that period of time.", 0) \ - M(Milliseconds, async_block_ids_cache_update_wait_ms, 100, "How long each insert iteration will wait for async_block_ids_cache update", 0) \ - M(Bool, use_async_block_ids_cache, true, "Use in-memory cache to filter duplicated async inserts based on block ids", 0) \ - M(UInt64, max_replicated_logs_to_keep, 1000, "How many records may be in log, if there is inactive replica. Inactive replica becomes lost when when this number exceed.", 0) \ - M(UInt64, min_replicated_logs_to_keep, 10, "Keep about this number of last records in ZooKeeper log, even if they are obsolete. It doesn't affect work of tables: used only to diagnose ZooKeeper log before cleaning.", 0) \ - M(Seconds, prefer_fetch_merged_part_time_threshold, 3600, "If time passed after replication log entry creation exceeds this threshold and sum size of parts is greater than \"prefer_fetch_merged_part_size_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ - M(UInt64, prefer_fetch_merged_part_size_threshold, 10ULL * 1024 * 1024 * 1024, "If sum size of parts exceeds this threshold and time passed after replication log entry creation is greater than \"prefer_fetch_merged_part_time_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ - M(Seconds, execute_merges_on_single_replica_time_threshold, 0, "When greater than zero only a single replica starts the merge immediately, others wait up to that amount of time to download the result instead of doing merges locally. If the chosen replica doesn't finish the merge during that amount of time, fallback to standard behavior happens.", 0) \ - M(Seconds, remote_fs_execute_merges_on_single_replica_time_threshold, 3 * 60 * 60, "When greater than zero only a single replica starts the merge immediately if merged part on shared storage and 'allow_remote_fs_zero_copy_replication' is enabled.", 0) \ - M(Seconds, try_fetch_recompressed_part_timeout, 7200, "Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression.", 0) \ - M(Bool, always_fetch_merged_part, false, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \ - M(UInt64, max_suspicious_broken_parts, 100, "Max broken parts, if more - deny automatic deletion.", 0) \ - M(UInt64, max_suspicious_broken_parts_bytes, 1ULL * 1024 * 1024 * 1024, "Max size of all broken parts, if more - deny automatic deletion.", 0) \ - M(UInt64, max_files_to_modify_in_alter_columns, 75, "Not apply ALTER if number of files for modification(deletion, addition) more than this.", 0) \ - M(UInt64, max_files_to_remove_in_alter_columns, 50, "Not apply ALTER, if number of files for deletion more than this.", 0) \ - M(Float, replicated_max_ratio_of_wrong_parts, 0.5, "If ratio of wrong parts to total number of parts is less than this - allow to start.", 0) \ - M(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \ - M(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \ - M(Seconds, initialization_retry_period, 60, "Retry period for table initialization, in seconds.", 0) \ - M(Bool, detach_old_local_parts_when_cloning_replica, true, "Do not remove old local parts when repairing lost replica.", 0) \ - M(Bool, detach_not_byte_identical_parts, false, "Do not remove non byte-idential parts for ReplicatedMergeTree, instead detach them (maybe useful for further analysis).", 0) \ - M(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \ - M(UInt64, max_replicated_sends_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \ - M(Milliseconds, wait_for_unique_parts_send_before_shutdown_ms, 0, "Before shutdown table will wait for required amount time for unique parts (exist only on current replica) to be fetched by other replicas (0 means disabled).", 0) \ - M(Float, fault_probability_before_part_commit, 0, "For testing. Do not change it.", 0) \ - M(Float, fault_probability_after_part_commit, 0, "For testing. Do not change it.", 0) \ - M(Bool, shared_merge_tree_disable_merges_and_mutations_assignment, false, "Only available in ClickHouse Cloud", 0) \ - M(Float, shared_merge_tree_partitions_hint_ratio_to_reload_merge_pred_for_mutations, 0.5, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, shared_merge_tree_parts_load_batch_size, 32, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, replicated_deduplication_window, 1000, "How many last blocks of hashes should be kept in ZooKeeper (old blocks will be deleted).", 0) \ + DECLARE(UInt64, replicated_deduplication_window_seconds, 7 * 24 * 60 * 60 /* one week */, "Similar to \"replicated_deduplication_window\", but determines old blocks by their lifetime. Hash of an inserted block will be deleted (and the block will not be deduplicated after) if it outside of one \"window\". You can set very big replicated_deduplication_window to avoid duplicating INSERTs during that period of time.", 0) \ + DECLARE(UInt64, replicated_deduplication_window_for_async_inserts, 10000, "How many last hash values of async_insert blocks should be kept in ZooKeeper (old blocks will be deleted).", 0) \ + DECLARE(UInt64, replicated_deduplication_window_seconds_for_async_inserts, 7 * 24 * 60 * 60 /* one week */, "Similar to \"replicated_deduplication_window_for_async_inserts\", but determines old blocks by their lifetime. Hash of an inserted block will be deleted (and the block will not be deduplicated after) if it outside of one \"window\". You can set very big replicated_deduplication_window to avoid duplicating INSERTs during that period of time.", 0) \ + DECLARE(Milliseconds, async_block_ids_cache_update_wait_ms, 100, "How long each insert iteration will wait for async_block_ids_cache update", 0) \ + DECLARE(Bool, use_async_block_ids_cache, true, "Use in-memory cache to filter duplicated async inserts based on block ids", 0) \ + DECLARE(UInt64, max_replicated_logs_to_keep, 1000, "How many records may be in log, if there is inactive replica. Inactive replica becomes lost when when this number exceed.", 0) \ + DECLARE(UInt64, min_replicated_logs_to_keep, 10, "Keep about this number of last records in ZooKeeper log, even if they are obsolete. It doesn't affect work of tables: used only to diagnose ZooKeeper log before cleaning.", 0) \ + DECLARE(Seconds, prefer_fetch_merged_part_time_threshold, 3600, "If time passed after replication log entry creation exceeds this threshold and sum size of parts is greater than \"prefer_fetch_merged_part_size_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ + DECLARE(UInt64, prefer_fetch_merged_part_size_threshold, 10ULL * 1024 * 1024 * 1024, "If sum size of parts exceeds this threshold and time passed after replication log entry creation is greater than \"prefer_fetch_merged_part_time_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ + DECLARE(Seconds, execute_merges_on_single_replica_time_threshold, 0, "When greater than zero only a single replica starts the merge immediately, others wait up to that amount of time to download the result instead of doing merges locally. If the chosen replica doesn't finish the merge during that amount of time, fallback to standard behavior happens.", 0) \ + DECLARE(Seconds, remote_fs_execute_merges_on_single_replica_time_threshold, 3 * 60 * 60, "When greater than zero only a single replica starts the merge immediately if merged part on shared storage and 'allow_remote_fs_zero_copy_replication' is enabled.", 0) \ + DECLARE(Seconds, try_fetch_recompressed_part_timeout, 7200, "Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression.", 0) \ + DECLARE(Bool, always_fetch_merged_part, false, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \ + DECLARE(UInt64, max_suspicious_broken_parts, 100, "Max broken parts, if more - deny automatic deletion.", 0) \ + DECLARE(UInt64, max_suspicious_broken_parts_bytes, 1ULL * 1024 * 1024 * 1024, "Max size of all broken parts, if more - deny automatic deletion.", 0) \ + DECLARE(UInt64, max_files_to_modify_in_alter_columns, 75, "Not apply ALTER if number of files for modification(deletion, addition) more than this.", 0) \ + DECLARE(UInt64, max_files_to_remove_in_alter_columns, 50, "Not apply ALTER, if number of files for deletion more than this.", 0) \ + DECLARE(Float, replicated_max_ratio_of_wrong_parts, 0.5, "If ratio of wrong parts to total number of parts is less than this - allow to start.", 0) \ + DECLARE(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \ + DECLARE(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \ + DECLARE(Seconds, initialization_retry_period, 60, "Retry period for table initialization, in seconds.", 0) \ + DECLARE(Bool, detach_old_local_parts_when_cloning_replica, true, "Do not remove old local parts when repairing lost replica.", 0) \ + DECLARE(Bool, detach_not_byte_identical_parts, false, "Do not remove non byte-idential parts for ReplicatedMergeTree, instead detach them (maybe useful for further analysis).", 0) \ + DECLARE(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \ + DECLARE(UInt64, max_replicated_sends_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \ + DECLARE(Milliseconds, wait_for_unique_parts_send_before_shutdown_ms, 0, "Before shutdown table will wait for required amount time for unique parts (exist only on current replica) to be fetched by other replicas (0 means disabled).", 0) \ + DECLARE(Float, fault_probability_before_part_commit, 0, "For testing. Do not change it.", 0) \ + DECLARE(Float, fault_probability_after_part_commit, 0, "For testing. Do not change it.", 0) \ + DECLARE(Bool, shared_merge_tree_disable_merges_and_mutations_assignment, false, "Only available in ClickHouse Cloud", 0) \ + DECLARE(Float, shared_merge_tree_partitions_hint_ratio_to_reload_merge_pred_for_mutations, 0.5, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, shared_merge_tree_parts_load_batch_size, 32, "Only available in ClickHouse Cloud", 0) \ \ /** Check delay of replicas settings. */ \ - M(UInt64, min_relative_delay_to_measure, 120, "Calculate relative replica delay only if absolute delay is not less that this value.", 0) \ - M(UInt64, cleanup_delay_period, 30, "Minimum period to clean old queue logs, blocks hashes and parts.", 0) \ - M(UInt64, max_cleanup_delay_period, 300, "Maximum period to clean old queue logs, blocks hashes and parts.", 0) \ - M(UInt64, cleanup_delay_period_random_add, 10, "Add uniformly distributed value from 0 to x seconds to cleanup_delay_period to avoid thundering herd effect and subsequent DoS of ZooKeeper in case of very large number of tables.", 0) \ - M(UInt64, cleanup_thread_preferred_points_per_iteration, 150, "Preferred batch size for background cleanup (points are abstract but 1 point is approximately equivalent to 1 inserted block).", 0) \ - M(UInt64, cleanup_threads, 128, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, kill_delay_period, 30, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, kill_delay_period_random_add, 10, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, kill_threads, 128, "Only available in ClickHouse Cloud", 0) \ - M(UInt64, min_relative_delay_to_close, 300, "Minimal delay from other replicas to close, stop serving requests and not return Ok during status check.", 0) \ - M(UInt64, min_absolute_delay_to_close, 0, "Minimal absolute delay to close, stop serving requests and not return Ok during status check.", 0) \ - M(UInt64, enable_vertical_merge_algorithm, 1, "Enable usage of Vertical merge algorithm.", 0) \ - M(UInt64, vertical_merge_algorithm_min_rows_to_activate, 16 * 8192, "Minimal (approximate) sum of rows in merging parts to activate Vertical merge algorithm.", 0) \ - M(UInt64, vertical_merge_algorithm_min_bytes_to_activate, 0, "Minimal (approximate) uncompressed size in bytes in merging parts to activate Vertical merge algorithm.", 0) \ - M(UInt64, vertical_merge_algorithm_min_columns_to_activate, 11, "Minimal amount of non-PK columns to activate Vertical merge algorithm.", 0) \ - M(Bool, vertical_merge_remote_filesystem_prefetch, true, "If true prefetching of data from remote filesystem is used for the next column during merge", 0) \ - M(UInt64, max_postpone_time_for_failed_mutations_ms, 5ULL * 60 * 1000, "The maximum postpone time for failed mutations.", 0) \ + DECLARE(UInt64, min_relative_delay_to_measure, 120, "Calculate relative replica delay only if absolute delay is not less that this value.", 0) \ + DECLARE(UInt64, cleanup_delay_period, 30, "Minimum period to clean old queue logs, blocks hashes and parts.", 0) \ + DECLARE(UInt64, max_cleanup_delay_period, 300, "Maximum period to clean old queue logs, blocks hashes and parts.", 0) \ + DECLARE(UInt64, cleanup_delay_period_random_add, 10, "Add uniformly distributed value from 0 to x seconds to cleanup_delay_period to avoid thundering herd effect and subsequent DoS of ZooKeeper in case of very large number of tables.", 0) \ + DECLARE(UInt64, cleanup_thread_preferred_points_per_iteration, 150, "Preferred batch size for background cleanup (points are abstract but 1 point is approximately equivalent to 1 inserted block).", 0) \ + DECLARE(UInt64, cleanup_threads, 128, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, kill_delay_period, 30, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, kill_delay_period_random_add, 10, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, kill_threads, 128, "Only available in ClickHouse Cloud", 0) \ + DECLARE(UInt64, min_relative_delay_to_close, 300, "Minimal delay from other replicas to close, stop serving requests and not return Ok during status check.", 0) \ + DECLARE(UInt64, min_absolute_delay_to_close, 0, "Minimal absolute delay to close, stop serving requests and not return Ok during status check.", 0) \ + DECLARE(UInt64, enable_vertical_merge_algorithm, 1, "Enable usage of Vertical merge algorithm.", 0) \ + DECLARE(UInt64, vertical_merge_algorithm_min_rows_to_activate, 16 * 8192, "Minimal (approximate) sum of rows in merging parts to activate Vertical merge algorithm.", 0) \ + DECLARE(UInt64, vertical_merge_algorithm_min_bytes_to_activate, 0, "Minimal (approximate) uncompressed size in bytes in merging parts to activate Vertical merge algorithm.", 0) \ + DECLARE(UInt64, vertical_merge_algorithm_min_columns_to_activate, 11, "Minimal amount of non-PK columns to activate Vertical merge algorithm.", 0) \ + DECLARE(Bool, vertical_merge_remote_filesystem_prefetch, true, "If true prefetching of data from remote filesystem is used for the next column during merge", 0) \ + DECLARE(UInt64, max_postpone_time_for_failed_mutations_ms, 5ULL * 60 * 1000, "The maximum postpone time for failed mutations.", 0) \ \ /** Compatibility settings */ \ - M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \ - M(Bool, compatibility_allow_sampling_expression_not_in_primary_key, false, "Allow to create a table with sampling expression not in primary key. This is needed only to temporarily allow to run the server with wrong tables for backward compatibility.", 0) \ - M(Bool, use_minimalistic_checksums_in_zookeeper, true, "Use small format (dozens bytes) for part checksums in ZooKeeper instead of ordinary ones (dozens KB). Before enabling check that all replicas support new format.", 0) \ - M(Bool, use_minimalistic_part_header_in_zookeeper, true, "Store part header (checksums and columns) in a compact format and a single part znode instead of separate znodes (/columns and /checksums). This can dramatically reduce snapshot size in ZooKeeper. Before enabling check that all replicas support new format.", 0) \ - M(UInt64, finished_mutations_to_keep, 100, "How many records about mutations that are done to keep. If zero, then keep all of them.", 0) \ - M(UInt64, min_merge_bytes_to_use_direct_io, 10ULL * 1024 * 1024 * 1024, "Minimal amount of bytes to enable O_DIRECT in merge (0 - disabled).", 0) \ - M(UInt64, index_granularity_bytes, 10 * 1024 * 1024, "Approximate amount of bytes in single granule (0 - disabled).", 0) \ - M(UInt64, min_index_granularity_bytes, 1024, "Minimum amount of bytes in single granule.", 1024) \ - M(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \ - M(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \ - M(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \ - M(Bool, materialize_ttl_recalculate_only, false, "Only recalculate ttl info when MATERIALIZE TTL", 0) \ - M(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \ - M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \ - M(UInt64, zero_copy_concurrent_part_removal_max_split_times, 5, "Max recursion depth for splitting independent Outdated parts ranges into smaller subranges (highly not recommended to change)", 0) \ - M(Float, zero_copy_concurrent_part_removal_max_postpone_ratio, static_cast(0.05), "Max percentage of top level parts to postpone removal in order to get smaller independent ranges (highly not recommended to change)", 0) \ - M(String, storage_policy, "default", "Name of storage disk policy", 0) \ - M(String, disk, "", "Name of storage disk. Can be specified instead of storage policy.", 0) \ - M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ - M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm.", 0) \ - M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \ - M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \ - M(UInt64, max_concurrent_queries, 0, "Max number of concurrently executed queries related to the MergeTree table (0 - disabled). Queries will still be limited by other max_concurrent_queries settings.", 0) \ - M(UInt64, min_marks_to_honor_max_concurrent_queries, 0, "Minimal number of marks to honor the MergeTree-level's max_concurrent_queries (0 - disabled). Queries will still be limited by other max_concurrent_queries settings.", 0) \ - M(UInt64, min_bytes_to_rebalance_partition_over_jbod, 0, "Minimal amount of bytes to enable part rebalance over JBOD array (0 - disabled).", 0) \ - M(Bool, check_sample_column_is_correct, true, "Check columns or columns by hash for sampling are unsigned integer.", 0) \ - M(Bool, allow_vertical_merges_from_compact_to_wide_parts, true, "Allows vertical merges from compact to wide parts. This settings must have the same value on all replicas", 0) \ - M(Bool, enable_the_endpoint_id_with_zookeeper_name_prefix, false, "Enable the endpoint id with zookeeper name prefix for the replicated merge tree table", 0) \ - M(UInt64, zero_copy_merge_mutation_min_parts_size_sleep_before_lock, 1ULL * 1024 * 1024 * 1024, "If zero copy replication is enabled sleep random amount of time before trying to lock depending on parts size for merge or mutation", 0) \ - M(Bool, allow_floating_point_partition_key, false, "Allow floating point as partition key", 0) \ - M(UInt64, sleep_before_loading_outdated_parts_ms, 0, "For testing. Do not change it.", 0) \ - M(Bool, always_use_copy_instead_of_hardlinks, false, "Always copy data instead of hardlinking during mutations/replaces/detaches and so on.", 0) \ - M(Bool, disable_freeze_partition_for_zero_copy_replication, true, "Disable FREEZE PARTITION query for zero copy replication.", 0) \ - M(Bool, disable_detach_partition_for_zero_copy_replication, true, "Disable DETACH PARTITION query for zero copy replication.", 0) \ - M(Bool, disable_fetch_partition_for_zero_copy_replication, true, "Disable FETCH PARTITION query for zero copy replication.", 0) \ - M(Bool, enable_block_number_column, false, "Enable persisting column _block_number for each row.", 0) ALIAS(allow_experimental_block_number_column) \ - M(Bool, enable_block_offset_column, false, "Enable persisting column _block_offset for each row.", 0) \ + DECLARE(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \ + DECLARE(Bool, compatibility_allow_sampling_expression_not_in_primary_key, false, "Allow to create a table with sampling expression not in primary key. This is needed only to temporarily allow to run the server with wrong tables for backward compatibility.", 0) \ + DECLARE(Bool, use_minimalistic_checksums_in_zookeeper, true, "Use small format (dozens bytes) for part checksums in ZooKeeper instead of ordinary ones (dozens KB). Before enabling check that all replicas support new format.", 0) \ + DECLARE(Bool, use_minimalistic_part_header_in_zookeeper, true, "Store part header (checksums and columns) in a compact format and a single part znode instead of separate znodes (/columns and /checksums). This can dramatically reduce snapshot size in ZooKeeper. Before enabling check that all replicas support new format.", 0) \ + DECLARE(UInt64, finished_mutations_to_keep, 100, "How many records about mutations that are done to keep. If zero, then keep all of them.", 0) \ + DECLARE(UInt64, min_merge_bytes_to_use_direct_io, 10ULL * 1024 * 1024 * 1024, "Minimal amount of bytes to enable O_DIRECT in merge (0 - disabled).", 0) \ + DECLARE(UInt64, index_granularity_bytes, 10 * 1024 * 1024, "Approximate amount of bytes in single granule (0 - disabled).", 0) \ + DECLARE(UInt64, min_index_granularity_bytes, 1024, "Minimum amount of bytes in single granule.", 1024) \ + DECLARE(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \ + DECLARE(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \ + DECLARE(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \ + DECLARE(Bool, materialize_ttl_recalculate_only, false, "Only recalculate ttl info when MATERIALIZE TTL", 0) \ + DECLARE(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \ + DECLARE(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \ + DECLARE(UInt64, zero_copy_concurrent_part_removal_max_split_times, 5, "Max recursion depth for splitting independent Outdated parts ranges into smaller subranges (highly not recommended to change)", 0) \ + DECLARE(Float, zero_copy_concurrent_part_removal_max_postpone_ratio, static_cast(0.05), "Max percentage of top level parts to postpone removal in order to get smaller independent ranges (highly not recommended to change)", 0) \ + DECLARE(String, storage_policy, "default", "Name of storage disk policy", 0) \ + DECLARE(String, disk, "", "Name of storage disk. Can be specified instead of storage policy.", 0) \ + DECLARE(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ + DECLARE(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm.", 0) \ + DECLARE(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \ + DECLARE(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \ + DECLARE(UInt64, max_concurrent_queries, 0, "Max number of concurrently executed queries related to the MergeTree table (0 - disabled). Queries will still be limited by other max_concurrent_queries settings.", 0) \ + DECLARE(UInt64, min_marks_to_honor_max_concurrent_queries, 0, "Minimal number of marks to honor the MergeTree-level's max_concurrent_queries (0 - disabled). Queries will still be limited by other max_concurrent_queries settings.", 0) \ + DECLARE(UInt64, min_bytes_to_rebalance_partition_over_jbod, 0, "Minimal amount of bytes to enable part rebalance over JBOD array (0 - disabled).", 0) \ + DECLARE(Bool, check_sample_column_is_correct, true, "Check columns or columns by hash for sampling are unsigned integer.", 0) \ + DECLARE(Bool, allow_vertical_merges_from_compact_to_wide_parts, true, "Allows vertical merges from compact to wide parts. This settings must have the same value on all replicas", 0) \ + DECLARE(Bool, enable_the_endpoint_id_with_zookeeper_name_prefix, false, "Enable the endpoint id with zookeeper name prefix for the replicated merge tree table", 0) \ + DECLARE(UInt64, zero_copy_merge_mutation_min_parts_size_sleep_before_lock, 1ULL * 1024 * 1024 * 1024, "If zero copy replication is enabled sleep random amount of time before trying to lock depending on parts size for merge or mutation", 0) \ + DECLARE(Bool, allow_floating_point_partition_key, false, "Allow floating point as partition key", 0) \ + DECLARE(UInt64, sleep_before_loading_outdated_parts_ms, 0, "For testing. Do not change it.", 0) \ + DECLARE(Bool, always_use_copy_instead_of_hardlinks, false, "Always copy data instead of hardlinking during mutations/replaces/detaches and so on.", 0) \ + DECLARE(Bool, disable_freeze_partition_for_zero_copy_replication, true, "Disable FREEZE PARTITION query for zero copy replication.", 0) \ + DECLARE(Bool, disable_detach_partition_for_zero_copy_replication, true, "Disable DETACH PARTITION query for zero copy replication.", 0) \ + DECLARE(Bool, disable_fetch_partition_for_zero_copy_replication, true, "Disable FETCH PARTITION query for zero copy replication.", 0) \ + DECLARE(Bool, enable_block_number_column, false, "Enable persisting column _block_number for each row.", 0) ALIAS(allow_experimental_block_number_column) \ + DECLARE(Bool, enable_block_offset_column, false, "Enable persisting column _block_offset for each row.", 0) \ \ /** Experimental/work in progress feature. Unsafe for production. */ \ - M(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", 0) \ - M(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", 0) \ - M(Bool, allow_remote_fs_zero_copy_replication, false, "Don't use this setting in production, because it is not ready.", 0) \ - M(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", 0) \ - M(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \ - M(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \ - M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \ - M(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \ + DECLARE(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", EXPERIMENTAL) \ + DECLARE(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", EXPERIMENTAL) \ + DECLARE(Bool, allow_remote_fs_zero_copy_replication, false, "Don't use this setting in production, because it is not ready.", BETA) \ + DECLARE(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", EXPERIMENTAL) \ + DECLARE(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", EXPERIMENTAL) \ + DECLARE(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", EXPERIMENTAL) \ + DECLARE(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", EXPERIMENTAL) \ + DECLARE(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", EXPERIMENTAL) \ \ /** Compress marks and primary key. */ \ - M(Bool, compress_marks, true, "Marks support compression, reduce mark file size and speed up network transmission.", 0) \ - M(Bool, compress_primary_key, true, "Primary key support compression, reduce primary key file size and speed up network transmission.", 0) \ - M(String, marks_compression_codec, "ZSTD(3)", "Compression encoding used by marks, marks are small enough and cached, so the default compression is ZSTD(3).", 0) \ - M(String, primary_key_compression_codec, "ZSTD(3)", "Compression encoding used by primary, primary key is small enough and cached, so the default compression is ZSTD(3).", 0) \ - M(UInt64, marks_compress_block_size, 65536, "Mark compress block size, the actual size of the block to compress.", 0) \ - M(UInt64, primary_key_compress_block_size, 65536, "Primary compress block size, the actual size of the block to compress.", 0) \ - M(Bool, primary_key_lazy_load, true, "Load primary key in memory on first use instead of on table initialization. This can save memory in the presence of a large number of tables.", 0) \ - M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ + DECLARE(Bool, compress_marks, true, "Marks support compression, reduce mark file size and speed up network transmission.", 0) \ + DECLARE(Bool, compress_primary_key, true, "Primary key support compression, reduce primary key file size and speed up network transmission.", 0) \ + DECLARE(String, marks_compression_codec, "ZSTD(3)", "Compression encoding used by marks, marks are small enough and cached, so the default compression is ZSTD(3).", 0) \ + DECLARE(String, primary_key_compression_codec, "ZSTD(3)", "Compression encoding used by primary, primary key is small enough and cached, so the default compression is ZSTD(3).", 0) \ + DECLARE(UInt64, marks_compress_block_size, 65536, "Mark compress block size, the actual size of the block to compress.", 0) \ + DECLARE(UInt64, primary_key_compress_block_size, 65536, "Primary compress block size, the actual size of the block to compress.", 0) \ + DECLARE(Bool, primary_key_lazy_load, true, "Load primary key in memory on first use instead of on table initialization. This can save memory in the presence of a large number of tables.", 0) \ + DECLARE(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ + DECLARE(Bool, prewarm_mark_cache, false, "If true mark cache will be prewarmed by saving marks to mark cache on inserts, merges, fetches and on startup of server", 0) \ + DECLARE(String, columns_to_prewarm_mark_cache, "", "List of columns to prewarm mark cache for (if enabled). Empty means all columns", 0) \ /** Projection settings. */ \ - M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ - M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge, drop or rebuild.", 0) \ + DECLARE(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ + DECLARE(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ + DECLARE(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge, drop or rebuild.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ - M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) + M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", SettingsTierType::OBSOLETE) #define OBSOLETE_MERGE_TREE_SETTINGS(M, ALIAS) \ /** Obsolete settings that do nothing but left for compatibility reasons. */ \ @@ -278,8 +281,9 @@ namespace ErrorCodes MERGE_TREE_SETTINGS(M, ALIAS) \ OBSOLETE_MERGE_TREE_SETTINGS(M, ALIAS) -DECLARE_SETTINGS_TRAITS(MergeTreeSettingsTraits, LIST_OF_MERGE_TREE_SETTINGS) +// clang-format on +DECLARE_SETTINGS_TRAITS(MergeTreeSettingsTraits, LIST_OF_MERGE_TREE_SETTINGS) /** Settings for the MergeTree family of engines. * Could be loaded from config or from a CREATE TABLE query (SETTINGS clause). @@ -333,7 +337,7 @@ void MergeTreeSettingsImpl::loadFromQuery(ASTStorage & storage_def, ContextPtr c else if (name == "storage_policy") found_storage_policy_setting = true; - if (found_disk_setting && found_storage_policy_setting) + if (!is_attach && found_disk_setting && found_storage_policy_setting) { throw Exception( ErrorCodes::BAD_ARGUMENTS, @@ -489,8 +493,7 @@ void MergeTreeColumnSettings::validate(const SettingsChanges & changes) } } -#define INITIALIZE_SETTING_EXTERN(TYPE, NAME, DEFAULT, DESCRIPTION, FLAGS) \ - MergeTreeSettings ## TYPE NAME = & MergeTreeSettings ## Impl :: NAME; +#define INITIALIZE_SETTING_EXTERN(TYPE, NAME, DEFAULT, DESCRIPTION, FLAGS) MergeTreeSettings##TYPE NAME = &MergeTreeSettingsImpl ::NAME; namespace MergeTreeSetting { @@ -514,18 +517,7 @@ MergeTreeSettings::MergeTreeSettings(MergeTreeSettings && settings) noexcept MergeTreeSettings::~MergeTreeSettings() = default; -#define IMPLEMENT_SETTING_SUBSCRIPT_OPERATOR(CLASS_NAME, TYPE) \ - const SettingField##TYPE & MergeTreeSettings::operator[](CLASS_NAME##TYPE t) const \ - { \ - return impl.get()->*t; \ - } \ -SettingField##TYPE & MergeTreeSettings::operator[](CLASS_NAME##TYPE t) \ - { \ - return impl.get()->*t; \ - } - MERGETREE_SETTINGS_SUPPORTED_TYPES(MergeTreeSettings, IMPLEMENT_SETTING_SUBSCRIPT_OPERATOR) -#undef IMPLEMENT_SETTING_SUBSCRIPT_OPERATOR bool MergeTreeSettings::has(std::string_view name) const { @@ -662,7 +654,8 @@ void MergeTreeSettings::dumpToSystemMergeTreeSettingsColumns(MutableColumnsAndCo res_columns[5]->insert(max); res_columns[6]->insert(writability == SettingConstraintWritability::CONST); res_columns[7]->insert(setting.getTypeName()); - res_columns[8]->insert(setting.isObsolete()); + res_columns[8]->insert(setting.getTier() == SettingsTierType::OBSOLETE); + res_columns[9]->insert(setting.getTier()); } } diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 1e42f16736d..99852309c77 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -94,7 +94,7 @@ void MergeTreeSink::consume(Chunk & chunk) DelayedPartitions partitions; const Settings & settings = context->getSettingsRef(); - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; auto token_info = chunk.getChunkInfos().get(); @@ -153,16 +153,18 @@ void MergeTreeSink::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); - if (streams > max_insert_delayed_streams_for_parallel_write) + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(); delayed_chunk = std::make_unique(); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -174,6 +176,8 @@ void MergeTreeSink::consume(Chunk & chunk) .block_dedup_token = block_dedup_token, .part_counters = std::move(part_counters), }); + + total_streams += current_streams; } if (need_to_define_dedup_token) @@ -243,6 +247,15 @@ void MergeTreeSink::finishDelayedChunk() /// Part can be deduplicated, so increment counters and add to part log only if it's really added if (added) { + if (auto * mark_cache = storage.getContext()->getMarkCache().get()) + { + for (const auto & stream : partition.temp_part.streams) + { + auto marks = stream.stream->releaseCachedMarks(); + addMarksToCache(*part, marks, mark_cache); + } + } + auto counters_snapshot = std::make_shared(partition.part_counters.getPartiallyAtomicSnapshot()); PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, partition.elapsed_ns, counters_snapshot)); StorageMergeTree::incrementInsertedPartsProfileEvent(part->getType()); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 4ee68580d3f..77c34aae30a 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -25,6 +25,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( CompressionCodecPtr default_codec_, TransactionID tid, bool reset_columns_, + bool save_marks_in_cache, bool blocks_are_granules_size, const WriteSettings & write_settings_, const MergeTreeIndexGranularity & computed_index_granularity) @@ -39,6 +40,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( storage_settings, data_part->index_granularity_info.mark_type.adaptive, /* rewrite_primary_key = */ true, + save_marks_in_cache, blocks_are_granules_size); /// TODO: looks like isStoredOnDisk() is always true for MergeTreeDataPart diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index e212fe5bb5a..060778866e0 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -24,6 +24,7 @@ public: CompressionCodecPtr default_codec_, TransactionID tid, bool reset_columns_ = false, + bool save_marks_in_cache = false, bool blocks_are_granules_size = false, const WriteSettings & write_settings = {}, const MergeTreeIndexGranularity & computed_index_granularity = {}); diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 05cd77dcd40..bed539dfe02 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -19,6 +19,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( const MergeTreeIndices & indices_to_recalc, const ColumnsStatistics & stats_to_recalc_, WrittenOffsetColumns * offset_columns_, + bool save_marks_in_cache, const MergeTreeIndexGranularity & index_granularity, const MergeTreeIndexGranularityInfo * index_granularity_info) : IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, /*reset_columns=*/ true) @@ -30,7 +31,9 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( data_part->storage.getContext()->getWriteSettings(), storage_settings, index_granularity_info ? index_granularity_info->mark_type.adaptive : data_part->storage.canUseAdaptiveGranularity(), - /* rewrite_primary_key = */ false); + /* rewrite_primary_key = */ false, + save_marks_in_cache, + /* blocks_are_granules_size = */ false); writer = createMergeTreeDataPartWriter( data_part->getType(), diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index e837a62743e..f6bf9e37a58 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -22,6 +22,7 @@ public: const MergeTreeIndices & indices_to_recalc_, const ColumnsStatistics & stats_to_recalc_, WrittenOffsetColumns * offset_columns_ = nullptr, + bool save_marks_in_cache = false, const MergeTreeIndexGranularity & index_granularity = {}, const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr); diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 54215cd2dba..6716144ce81 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -226,6 +226,10 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() future_mutated_part, task_context); + storage.writePartLog( + PartLogElement::MUTATE_PART_START, {}, 0, + entry.new_part_name, new_part, future_mutated_part->parts, merge_mutate_entry.get(), {}); + mutate_task = storage.merger_mutator.mutatePartToTemporaryPart( future_mutated_part, metadata_snapshot, commands, merge_mutate_entry.get(), entry.create_time, task_context, NO_TRANSACTION_PTR, reserved_space, table_lock_holder); diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 53aef36404e..fbc20b282ca 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -39,6 +39,10 @@ void MutatePlainMergeTreeTask::prepare() future_part, task_context); + storage.writePartLog( + PartLogElement::MUTATE_PART_START, {}, 0, + future_part->name, new_part, future_part->parts, merge_list_entry.get(), {}); + stopwatch = std::make_unique(); write_part_log = [this] (const ExecutionStatus & execution_status) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 2e7847fc99f..936df7b0275 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1623,6 +1623,7 @@ private: ctx->compression_codec, ctx->txn ? ctx->txn->tid : Tx::PrehistoricTID, /*reset_columns=*/ true, + /*save_marks_in_cache=*/ false, /*blocks_are_granules_size=*/ false, ctx->context->getWriteSettings(), computed_granularity); @@ -1851,6 +1852,7 @@ private: std::vector(ctx->indices_to_recalc.begin(), ctx->indices_to_recalc.end()), ColumnsStatistics(ctx->stats_to_recalc.begin(), ctx->stats_to_recalc.end()), nullptr, + /*save_marks_in_cache=*/ false, ctx->source_part->index_granularity, &ctx->source_part->index_granularity_info ); @@ -2164,6 +2166,7 @@ bool MutateTask::prepare() context_for_reading->setSetting("apply_mutations_on_fly", false); /// Skip using large sets in KeyCondition context_for_reading->setSetting("use_index_for_in_with_subqueries_max_values", 100000); + context_for_reading->setSetting("use_concurrency_control", false); for (const auto & command : *ctx->commands) if (!canSkipMutationCommandForPart(ctx->source_part, command, context_for_reading)) @@ -2286,7 +2289,7 @@ bool MutateTask::prepare() String tmp_part_dir_name = prefix + ctx->future_part->name; ctx->temporary_directory_lock = ctx->data->getTemporaryPartDirectoryHolder(tmp_part_dir_name); - auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name); + auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name, getReadSettings()); builder.withPartFormat(ctx->future_part->part_format); builder.withPartInfo(ctx->future_part->part_info); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp index 22b8ccca151..c258048354e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -20,7 +21,6 @@ namespace ErrorCodes { extern const int SUPPORT_IS_DISABLED; extern const int REPLICA_STATUS_CHANGED; - extern const int LOGICAL_ERROR; } ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_) @@ -123,67 +123,6 @@ void ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(const z } } -Int32 ReplicatedMergeTreeAttachThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper) -{ - const String & zookeeper_path = storage.zookeeper_path; - const String & replica_path = storage.replica_path; - const bool replica_readonly = storage.is_readonly; - - for (size_t i = 0; i != 2; ++i) - { - String replica_metadata_version_str; - const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str); - if (!replica_metadata_version_exists) - return -1; - - const Int32 metadata_version = parse(replica_metadata_version_str); - - if (metadata_version != 0 || replica_readonly) - { - /// No need to fix anything - return metadata_version; - } - - Coordination::Stat stat; - zookeeper->get(fs::path(zookeeper_path) / "metadata", &stat); - if (stat.version == 0) - { - /// No need to fix anything - return metadata_version; - } - - ReplicatedMergeTreeQueue & queue = storage.queue; - queue.pullLogsToQueue(zookeeper); - if (queue.getStatus().metadata_alters_in_queue != 0) - { - LOG_DEBUG(log, "No need to update metadata_version as there are ALTER_METADATA entries in the queue"); - return metadata_version; - } - - const Coordination::Requests ops = { - zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(stat.version), 0), - zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", stat.version), - }; - Coordination::Responses ops_responses; - const auto code = zookeeper->tryMulti(ops, ops_responses); - if (code == Coordination::Error::ZOK) - { - LOG_DEBUG(log, "Successfully set metadata_version to {}", stat.version); - return stat.version; - } - if (code != Coordination::Error::ZBADVERSION) - { - throw zkutil::KeeperException(code); - } - } - - /// Second attempt is only possible if metadata_version != 0 or metadata.version changed during the first attempt. - /// If metadata_version != 0, on second attempt we will return the new metadata_version. - /// If metadata.version changed, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0. - /// Either way, on second attempt this method should return. - throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts"); -} - void ReplicatedMergeTreeAttachThread::runImpl() { storage.setZooKeeper(); @@ -227,33 +166,6 @@ void ReplicatedMergeTreeAttachThread::runImpl() /// Just in case it was not removed earlier due to connection loss zookeeper->tryRemove(replica_path + "/flags/force_restore_data"); - const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper); - const bool replica_metadata_version_exists = replica_metadata_version != -1; - if (replica_metadata_version_exists) - { - storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(replica_metadata_version)); - } - else - { - /// Table was created before 20.4 and was never altered, - /// let's initialize replica metadata version from global metadata version. - Coordination::Stat table_metadata_version_stat; - zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); - - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); - - Coordination::Responses res; - auto code = zookeeper->tryMulti(ops, res); - - if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " - "because table was concurrently altered, will retry"); - - zkutil::KeeperMultiException::check(code, ops, res); - } - storage.checkTableStructure(replica_path, metadata_snapshot); storage.checkParts(skip_sanity_checks); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h index bfc97442598..250a5ed34d1 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h @@ -48,8 +48,6 @@ private: void runImpl(); void finalizeInitialization(); - - Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper); }; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 6b1581645f8..b1564b58a6c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -615,7 +615,7 @@ std::pair ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::Zo { std::lock_guard lock(pull_logs_to_queue_mutex); - if (reason != LOAD) + if (reason != LOAD && reason != FIX_METADATA_VERSION) { /// It's totally ok to load queue on readonly replica (that's what RestartingThread does on initialization). /// It's ok if replica became readonly due to connection loss after we got current zookeeper (in this case zookeeper must be expired). diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 9d3349663e2..6ec8818b0c6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -334,6 +334,7 @@ public: UPDATE, MERGE_PREDICATE, SYNC, + FIX_METADATA_VERSION, OTHER, }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 9d3e26cdc8d..93124e634bd 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -29,6 +29,8 @@ namespace MergeTreeSetting namespace ErrorCodes { extern const int REPLICA_IS_ALREADY_ACTIVE; + extern const int REPLICA_STATUS_CHANGED; + extern const int LOGICAL_ERROR; } namespace FailPoints @@ -207,6 +209,36 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() throw; } + const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper); + const bool replica_metadata_version_exists = replica_metadata_version != -1; + if (replica_metadata_version_exists) + { + storage.setInMemoryMetadata(storage.getInMemoryMetadataPtr()->withMetadataVersion(replica_metadata_version)); + } + else + { + /// Table was created before 20.4 and was never altered, + /// let's initialize replica metadata version from global metadata version. + + const String & zookeeper_path = storage.zookeeper_path, & replica_path = storage.replica_path; + + Coordination::Stat table_metadata_version_stat; + zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); + + Coordination::Requests ops; + ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); + ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); + + Coordination::Responses res; + auto code = zookeeper->tryMulti(ops, res); + + if (code == Coordination::Error::ZBADVERSION) + throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " + "because table was concurrently altered, will retry"); + + zkutil::KeeperMultiException::check(code, ops, res); + } + storage.queue.removeCurrentPartsFromMutations(); storage.last_queue_update_finish_time.store(time(nullptr)); @@ -424,4 +456,64 @@ void ReplicatedMergeTreeRestartingThread::setNotReadonly() storage.readonly_start_time.store(0, std::memory_order_relaxed); } + +Int32 ReplicatedMergeTreeRestartingThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper) +{ + const String & zookeeper_path = storage.zookeeper_path; + const String & replica_path = storage.replica_path; + + const size_t num_attempts = 2; + for (size_t attempt = 0; attempt != num_attempts; ++attempt) + { + String replica_metadata_version_str; + Coordination::Stat replica_stat; + const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str, &replica_stat); + if (!replica_metadata_version_exists) + return -1; + + const Int32 metadata_version = parse(replica_metadata_version_str); + if (metadata_version != 0) + return metadata_version; + + Coordination::Stat table_stat; + zookeeper->get(fs::path(zookeeper_path) / "metadata", &table_stat); + if (table_stat.version == 0) + return metadata_version; + + ReplicatedMergeTreeQueue & queue = storage.queue; + queue.pullLogsToQueue(zookeeper, {}, ReplicatedMergeTreeQueue::FIX_METADATA_VERSION); + if (queue.getStatus().metadata_alters_in_queue != 0) + { + LOG_INFO(log, "Skipping updating metadata_version as there are ALTER_METADATA entries in the queue"); + return metadata_version; + } + + const Coordination::Requests ops = { + zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(table_stat.version), replica_stat.version), + zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", table_stat.version), + }; + Coordination::Responses ops_responses; + const Coordination::Error code = zookeeper->tryMulti(ops, ops_responses); + if (code == Coordination::Error::ZOK) + { + LOG_DEBUG(log, "Successfully set metadata_version to {}", table_stat.version); + return table_stat.version; + } + + if (code == Coordination::Error::ZBADVERSION) + { + LOG_WARNING(log, "Cannot fix metadata_version because either metadata.version or metadata_version.version changed, attempts left = {}", num_attempts - attempt - 1); + continue; + } + + throw zkutil::KeeperException(code); + } + + /// Second attempt is only possible if either metadata_version.version or metadata.version changed during the first attempt. + /// If metadata_version changed to non-zero value during the first attempt, on second attempt we will return the new metadata_version. + /// If metadata.version changed during first attempt, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0. + /// So either first or second attempt should return unless metadata_version was rewritten from 0 to 0 during the first attempt which is highly unlikely. + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts"); +} + } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index d719505ae5e..6f450dc1d40 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB @@ -68,6 +69,9 @@ private: /// Disable readonly mode for table void setNotReadonly(); + + /// Fix replica metadata_version if needed + Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper); }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 95469337f8a..f3ae6e77ac3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -341,7 +341,7 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) using DelayedPartitions = std::vector; DelayedPartitions partitions; - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; for (auto & current_block : part_blocks) @@ -418,15 +418,18 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); - if (streams > max_insert_delayed_streams_for_parallel_write) + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); + + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(zookeeper); delayed_chunk = std::make_unique::DelayedChunk>(replicas_num); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(zookeeper); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -447,6 +450,8 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) std::move(unmerged_block), std::move(part_counters) /// profile_events_scope must be reset here. )); + + total_streams += current_streams; } if (need_to_define_dedup_token) @@ -481,6 +486,17 @@ void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithF /// Set a special error code if the block is duplicate int error = (deduplicate && deduplicated) ? ErrorCodes::INSERT_WAS_DEDUPLICATED : 0; + auto * mark_cache = storage.getContext()->getMarkCache().get(); + + if (!error && mark_cache) + { + for (const auto & stream : partition.temp_part.streams) + { + auto marks = stream.stream->releaseCachedMarks(); + addMarksToCache(*part, marks, mark_cache); + } + } + auto counters_snapshot = std::make_shared(partition.part_counters.getPartiallyAtomicSnapshot()); PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, partition.elapsed_ns, counters_snapshot), ExecutionStatus(error)); StorageReplicatedMergeTree::incrementInsertedPartsProfileEvent(part->getType()); @@ -521,8 +537,18 @@ void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithFa { partition.temp_part.finalize(); auto conflict_block_ids = commitPart(zookeeper, partition.temp_part.part, partition.block_id, delayed_chunk->replicas_num).first; + if (conflict_block_ids.empty()) { + if (auto * mark_cache = storage.getContext()->getMarkCache().get()) + { + for (const auto & stream : partition.temp_part.streams) + { + auto marks = stream.stream->releaseCachedMarks(); + addMarksToCache(*partition.temp_part.part, marks, mark_cache); + } + } + auto counters_snapshot = std::make_shared(partition.part_counters.getPartiallyAtomicSnapshot()); PartLog::addNewPart( storage.getContext(), diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 2a1ddf32431..34e699bcef7 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -135,7 +135,6 @@ bool isRetryableException(std::exception_ptr exception_ptr) } } - static IMergeTreeDataPart::Checksums checkDataPart( MergeTreeData::DataPartPtr data_part, const IDataPartStorage & data_part_storage, @@ -422,6 +421,7 @@ IMergeTreeDataPart::Checksums checkDataPart( } ReadSettings read_settings; + read_settings.read_through_distributed_cache = false; read_settings.enable_filesystem_cache = false; read_settings.enable_filesystem_cache_log = false; read_settings.enable_filesystem_read_prefetches_log = false; From 863887cca5674088ebe15a95d07b2ee0aebf4597 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:50:47 +0100 Subject: [PATCH 299/566] Reset WriteSettings to master --- src/IO/WriteSettings.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index 4eeb01b5acc..94410f787f0 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -4,7 +4,6 @@ #include #include - namespace DB { @@ -29,8 +28,6 @@ struct WriteSettings bool use_adaptive_write_buffer = false; size_t adaptive_write_buffer_initial_size = 16 * 1024; - size_t max_compression_threads = 1; - bool write_through_distributed_cache = false; DistributedCacheSettings distributed_cache_settings; From c78272871f26d541800bb783d886325efa3c3ee7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:51:32 +0100 Subject: [PATCH 300/566] Rollback some changes --- .../0_stateless/03254_parallel_compression.sql | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 tests/queries/0_stateless/03254_parallel_compression.sql diff --git a/tests/queries/0_stateless/03254_parallel_compression.sql b/tests/queries/0_stateless/03254_parallel_compression.sql deleted file mode 100644 index a17deed7d8c..00000000000 --- a/tests/queries/0_stateless/03254_parallel_compression.sql +++ /dev/null @@ -1,11 +0,0 @@ -DROP TABLE IF EXISTS test2; - -CREATE TABLE test2 -( - k UInt64 -) ENGINE = MergeTree ORDER BY k SETTINGS min_compress_block_size = 10240, min_bytes_for_wide_part = 1, max_compression_threads = 64; - -INSERT INTO test2 SELECT number FROM numbers(20000); -SELECT sum(k) = (9999 * 10000 / 2 + 10000 * 9999) FROM test2 WHERE k > 10000; - -DROP TABLE test2; From 3fa72482a75551c3132fa2f7ee8770d4f6862f98 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 8 Nov 2024 00:56:38 +0100 Subject: [PATCH 301/566] Revert some changes --- src/Compression/ParallelCompressedWriteBuffer.cpp | 4 ---- src/Compression/ParallelCompressedWriteBuffer.h | 10 ---------- 2 files changed, 14 deletions(-) diff --git a/src/Compression/ParallelCompressedWriteBuffer.cpp b/src/Compression/ParallelCompressedWriteBuffer.cpp index 303e1ece68a..3831d07e91a 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.cpp +++ b/src/Compression/ParallelCompressedWriteBuffer.cpp @@ -44,8 +44,6 @@ void ParallelCompressedWriteBuffer::nextImpl() /// The buffer will be compressed and processed in the thread. current_buffer->busy = true; current_buffer->sequence_num = current_sequence_num; - current_buffer->out_callback = callback; - callback = {}; ++current_sequence_num; current_buffer->uncompressed_size = offset(); pool.scheduleOrThrowOnError([this, my_current_buffer = current_buffer, thread_group = CurrentThread::getGroup()] @@ -155,8 +153,6 @@ void ParallelCompressedWriteBuffer::compress(Iterator buffer) } std::unique_lock lock(mutex); - if (buffer->out_callback) - buffer->out_callback(); buffer->busy = false; cond.notify_all(); } diff --git a/src/Compression/ParallelCompressedWriteBuffer.h b/src/Compression/ParallelCompressedWriteBuffer.h index 8c5f249b06c..38a3a083e19 100644 --- a/src/Compression/ParallelCompressedWriteBuffer.h +++ b/src/Compression/ParallelCompressedWriteBuffer.h @@ -31,13 +31,6 @@ public: ~ParallelCompressedWriteBuffer() override; - /// This function will be called once after compressing the next data and sending it to the out. - /// It can be used to fill information about marks. - void setCompletionCallback(std::function callback_) - { - callback = callback_; - } - private: void nextImpl() override; void finalizeImpl() override; @@ -61,15 +54,12 @@ private: BufferPair * previous = nullptr; size_t sequence_num = 0; bool busy = false; - std::function out_callback; }; std::mutex mutex; std::condition_variable cond; std::list buffers; - std::function callback; - using Iterator = std::list::iterator; Iterator current_buffer; size_t current_sequence_num = 0; From f24dca21a56f97ce2d422bc3e411868eca5c751c Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 09:15:15 +0100 Subject: [PATCH 302/566] Implement CLICKHOUSE_RUN_AS_ROOT instead of preser UID/GID --- docker/keeper/entrypoint.sh | 25 +++++++++++++++++-------- docker/server/entrypoint.sh | 25 +++++++++++++++++-------- 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 92b91a0f8c3..31e4c8b63da 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -5,19 +5,28 @@ set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then +if [[ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" || "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" ]]; then DO_CHOWN=0 fi +# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated +# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as +# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3 +if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2 + echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2 +fi + # support `docker run --user=xxx:xxxx` -if [ "$(id -u)" = "0" ]; then - # CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility - if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then - echo 'WARNING: consider using a proper "--user=xxx:xxxx" running argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 - echo 'Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases' >&2 +if [[ "$(id -u)" = "0" ]]; then + if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then + USER=0 + GROUP=0 + else + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" fi - USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" - GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" if command -v gosu &> /dev/null; then gosu="gosu $USER:$GROUP" elif command -v su-exec &> /dev/null; then diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 5a91d54d32b..443bcd7a176 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -4,19 +4,28 @@ set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then +if [[ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" || "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" ]]; then DO_CHOWN=0 fi +# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated +# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as +# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3 +if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2 + echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2 +fi + # support `docker run --user=xxx:xxxx` -if [ "$(id -u)" = "0" ]; then - # CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility - if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then - echo 'WARNING: consider using a proper "--user=xxx:xxxx" running argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 - echo 'Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases' >&2 +if [[ "$(id -u)" = "0" ]]; then + if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then + USER=0 + GROUP=0 + else + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" fi - USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" - GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" else USER="$(id -u)" GROUP="$(id -g)" From a828e3e923ef06666d4582c34868750bbbee3e6a Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 5 Nov 2024 12:59:14 +0000 Subject: [PATCH 303/566] test --- .github/workflows/pr.yaml | 108 ++++++++++++++++---------- ci/jobs/build_clickhouse.py | 30 +++---- ci/jobs/fast_test.py | 2 + ci/jobs/functional_stateless_tests.py | 92 +++++++++++++--------- ci/jobs/scripts/clickhouse_proc.py | 39 +++------- ci/praktika/_environment.py | 12 +-- ci/praktika/cidb.py | 2 +- ci/praktika/digest.py | 20 ++--- ci/praktika/environment.py | 3 - ci/praktika/hook_html.py | 20 ++--- ci/praktika/job.py | 1 + ci/praktika/json.html | 22 +++--- ci/praktika/mangle.py | 55 +++++++------ ci/praktika/native_jobs.py | 6 +- ci/praktika/param.py | 8 -- ci/praktika/result.py | 6 -- ci/praktika/runner.py | 7 +- ci/praktika/utils.py | 27 +++---- ci/praktika/validator.py | 89 ++++++++++----------- ci/praktika/yaml_generator.py | 11 +-- ci/settings/definitions.py | 5 +- ci/workflows/pull_request.py | 54 +++++++++---- tests/clickhouse-test | 30 +++---- tests/config/install.sh | 2 +- tests/docker_scripts/setup_minio.sh | 6 +- 25 files changed, 334 insertions(+), 323 deletions(-) delete mode 100644 ci/praktika/environment.py delete mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 0c3f74aeac8..51bb9b52d10 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -31,8 +31,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -72,8 +71,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -113,8 +111,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -154,8 +151,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -195,8 +191,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -236,8 +231,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -266,19 +260,18 @@ jobs: python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_1_2: + stateless_tests_amd_debugparallel: runs-on: [builder] needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 1/2)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcscGFyYWxsZWwp') }} + name: "Stateless tests (amd_debug,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -302,24 +295,63 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_2_2: + stateless_tests_amd_debugnon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsbm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug,non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_releaseparallel: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 2/2)" + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_release,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -343,24 +375,23 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_non_parallel: - runs-on: [style-checker] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd, debug) (non-parallel)" + stateless_tests_amd_releasenon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxub24tcGFyYWxsZWwp') }} + name: "Stateless tests (amd_release,non-parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -384,14 +415,14 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -400,8 +431,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 3db88938f23..1e6d2c648a7 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,6 +1,5 @@ import argparse -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -16,8 +15,7 @@ def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( "--build-type", - help="Type: __", - default=None, + help="Type: ,,", ) parser.add_argument( "--param", @@ -30,7 +28,7 @@ def parse_args(): CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ -DCMAKE_BUILD_TYPE={BUILD_TYPE} \ -DSANITIZE={SANITIZER} \ --DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 \ -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ {AUX_DEFS} \ @@ -54,33 +52,26 @@ def main(): stages.pop(0) stages.insert(0, stage) - cmake_build_type = "Release" - sanitizer = "" - - if args.build_type and get_param(): - assert ( - False - ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - - build_type = args.build_type or get_param() + build_type = args.build_type assert ( build_type ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" build_type = build_type.lower() - # if Environment.is_local_run(): - # build_cache_type = "disabled" - # else: CACHE_TYPE = "sccache" if "debug" in build_type: print("Build type set: debug") BUILD_TYPE = "Debug" - AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + AUX_DEFS = ( + " -DENABLE_TESTS=1 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + ) elif "release" in build_type: print("Build type set: release") - BUILD_TYPE = "None" - AUX_DEFS = " -DENABLE_TESTS=1 " + BUILD_TYPE = "RelWithDebInfo" + AUX_DEFS = " -DENABLE_TESTS=0 " + else: + assert False if "asan" in build_type: print("Sanitizer set: address") @@ -136,6 +127,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index cb7d925fead..03a4c0cd496 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -215,11 +215,13 @@ def main(): ) if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() step_name = "Tests" print(step_name) res = res and CH.run_fast_test() if res: results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) CH.terminate() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index d77522ed73a..0481086d80a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,15 +1,13 @@ import argparse -import os +import time from pathlib import Path -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -from ci.settings.definitions import azure_secret class JobStages(metaclass=MetaClasses.WithIter): @@ -21,9 +19,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}" ) - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "--test-options", + help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..", + default="", + ) + parser.add_argument("--param", help="Optional job start stage", default=None) return parser.parse_args() @@ -50,28 +53,31 @@ def run_stateless_test( def main(): args = parse_args() - params = get_param().split(" ") - parallel_or_sequential = None - no_parallel = False - no_sequential = False - if params: - parallel_or_sequential = params[0] - if len(params) > 1: - batch_num, total_batches = map(int, params[1].split("/")) - else: - batch_num, total_batches = 0, 0 - if parallel_or_sequential: - no_parallel = parallel_or_sequential == "non-parallel" - no_sequential = parallel_or_sequential == "parallel" + test_options = args.test_options.split(",") + no_parallel = "non-parallel" in test_options + no_sequential = "parallel" in test_options + batch_num, total_batches = 0, 0 + for to in test_options: + if "/" in to: + batch_num, total_batches = map(int, to.split("/")) - os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( - f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", - verbose=True, - ) + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # strict=True + # ) + + ch_path = args.ch_path + assert Path( + ch_path + "/clickhouse" + ).is_file(), f"clickhouse binary not found under [{ch_path}]" stop_watch = Utils.Stopwatch() stages = list(JobStages) + + logs_to_attach = [] + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" @@ -83,19 +89,22 @@ def main(): res = True results = [] - Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + Utils.add_to_PATH(f"{ch_path}:tests") if res and JobStages.INSTALL_CLICKHOUSE in stages: commands = [ - f"chmod +x {Settings.INPUT_DIR}/clickhouse", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"chmod +x {ch_path}/clickhouse", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", # update_path_ch_config, - f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", - f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + # f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + # f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|local_disk|{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done", f"clickhouse-server --version", ] results.append( @@ -110,22 +119,27 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) - res = res and CH.start_minio() + minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_minio(log_file_path=minio_log) + logs_to_attach += [minio_log] + time.sleep(10) + Shell.check("ps -ef | grep minio", verbose=True) + res = res and Shell.check( + "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True + ) res = res and CH.start() res = res and CH.wait_ready() + if res: + print("ch started") + logs_to_attach += [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] results.append( Result.create_from( name=step_name, status=res, stopwatch=stop_watch_, - files=( - [ - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", - ] - if not res - else [] - ), ) ) res = results[-1].is_ok() @@ -144,7 +158,9 @@ def main(): results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() + Result.create_from( + results=results, stopwatch=stop_watch, files=logs_to_attach if not res else [] + ).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index cc822eab693..c43283e75e0 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -1,5 +1,4 @@ -import threading -import time +import subprocess from pathlib import Path from praktika.settings import Settings @@ -39,39 +38,25 @@ class ClickHouseProc: Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - if not fast_test: - with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: - file.write(self.BACKUPS_XML) + # if not fast_test: + # with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + # file.write(self.BACKUPS_XML) self.minio_proc = None - def start_minio(self): - print("Starting minio") - - def run_minio(): - self.minio_proc = Shell.run_async( - self.minio_cmd, verbose=True, suppress_output=True + def start_minio(self, log_file_path): + command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT ) - - thread = threading.Thread(target=run_minio) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - time.sleep(5) - return thread.is_alive() + print(f"Started setup_minio.sh asynchronously with PID {process.pid}") + return True def start(self): print("Starting ClickHouse server") Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=False - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - + self.proc = subprocess.Popen(self.command, stderr=subprocess.STDOUT, shell=True) started = False try: for _ in range(5): diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 4ac8ad319f9..1c6b547ddde 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -30,7 +30,6 @@ class _Environment(MetaClasses.Serializable): INSTANCE_ID: str INSTANCE_LIFE_CYCLE: str LOCAL_RUN: bool = False - PARAMETER: Any = None REPORT_INFO: List[str] = dataclasses.field(default_factory=list) name = "environment" @@ -172,18 +171,15 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self): + def get_report_url(self, settings): import urllib - from praktika.settings import Settings - from praktika.utils import Utils - - path = Settings.HTML_S3_PATH - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): + path = settings.HTML_S3_PATH + for bucket, endpoint in settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/cidb.py b/ci/praktika/cidb.py index 087845ec762..53088c102cd 100644 --- a/ci/praktika/cidb.py +++ b/ci/praktika/cidb.py @@ -52,7 +52,7 @@ class CIDB: check_status=result.status, check_duration_ms=int(result.duration * 1000), check_start_time=Utils.timestamp_to_str(result.start_time), - report_url=env.get_report_url(), + report_url=env.get_report_url(settings=Settings), pull_request_url=env.CHANGE_URL, base_ref=env.BASE_BRANCH, base_repo=env.REPOSITORY, diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index 93b62b13dc0..a1f2eecf9b6 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -31,6 +31,9 @@ class Digest: cache_key = self._hash_digest_config(config) if cache_key in self.digest_cache: + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" + ) return self.digest_cache[cache_key] included_files = Utils.traverse_paths( @@ -38,12 +41,9 @@ class Digest: job_config.digest_config.exclude_paths, sorted=True, ) - print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" ) - # Sort files to ensure consistent hash calculation - included_files.sort() # Calculate MD5 hash res = "" @@ -52,11 +52,11 @@ class Digest: print(f"NOTE: empty digest config [{config}] - return dummy digest") else: hash_md5 = hashlib.md5() - for file_path in included_files: - res = self._calc_file_digest(file_path, hash_md5) - assert res - self.digest_cache[cache_key] = res - return res + for i, file_path in enumerate(included_files): + hash_md5 = self._calc_file_digest(file_path, hash_md5) + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + return digest def calc_docker_digest( self, @@ -103,10 +103,10 @@ class Digest: print( f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation" ) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 with open(resolved_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 diff --git a/ci/praktika/environment.py b/ci/praktika/environment.py deleted file mode 100644 index 8f53aa6230b..00000000000 --- a/ci/praktika/environment.py +++ /dev/null @@ -1,3 +0,0 @@ -from praktika._environment import _Environment - -Environment = _Environment.get() diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index cea84192d0d..ca2692d1b22 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -1,6 +1,5 @@ import dataclasses import json -import urllib.parse from pathlib import Path from typing import List @@ -132,17 +131,9 @@ class HtmlRunnerHooks: result = Result.generate_skipped(job.name) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) - summary_result.aux_links.append(env.CHANGE_URL) - summary_result.aux_links.append(env.RUN_URL) + summary_result.links.append(env.CHANGE_URL) + summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - page_url = "/".join( - ["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)] - ) - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): - page_url = page_url.replace(bucket, endpoint) - # TODO: add support for non-PRs (use branch?) - page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}" - summary_result.html_link = page_url # clean the previous latest results in PR if any if env.PR_NUMBER: @@ -152,13 +143,14 @@ class HtmlRunnerHooks: unlock=False, ) + page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") res1 = GH.post_commit_status( name=_workflow.name, status=Result.Status.PENDING, description="", - url=page_url, + url=env.get_report_url(settings=Settings), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -248,11 +240,11 @@ class HtmlRunnerHooks: ) if workflow_result.status != old_status: print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]" + f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" ) GH.post_commit_status( name=workflow_result.name, status=GH.convert_to_gh_status(workflow_result.status), description="", - url=workflow_result.html_link, + url=env.get_report_url(settings=Settings), ) diff --git a/ci/praktika/job.py b/ci/praktika/job.py index 99eb08938b8..595a86456e9 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -89,6 +89,7 @@ class Job: ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ + obj.command = obj.command.format(PARAMETER=parameter_) if runs_on_: obj.runs_on = runs_on_ if timeout_: diff --git a/ci/praktika/json.html b/ci/praktika/json.html index f86a7b27ecb..4e15a67ba76 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -663,20 +663,20 @@ let targetData = navigatePath(data, nameParams); let nest_level = nameParams.length; + // Add footer links from top-level Result + if (Array.isArray(data.links) && data.links.length > 0) { + data.links.forEach(link => { + const a = document.createElement('a'); + a.href = link; + a.textContent = link.split('/').pop(); + a.target = '_blank'; + footerRight.appendChild(a); + }); + } + if (targetData) { infoElement.style.display = 'none'; - // Handle footer links if present - if (Array.isArray(data.aux_links) && data.aux_links.length > 0) { - data.aux_links.forEach(link => { - const a = document.createElement('a'); - a.href = link; - a.textContent = link.split('/').pop(); - a.target = '_blank'; - footerRight.appendChild(a); - }); - } - addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) // Handle links diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index bca33f9e660..b16d52fbbbf 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -14,35 +14,34 @@ def _get_workflows(name=None, file=None): """ res = [] - with ContextManager.cd(): - directory = Path(_Settings.WORKFLOWS_DIRECTORY) - for py_file in directory.glob("*.py"): - if file and file not in str(py_file): - continue - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - try: - for workflow in foo.WORKFLOWS: - if name: - if name == workflow.name: - print(f"Read workflow [{name}] config from [{module_name}]") - res = [workflow] - break - else: - continue + directory = Path(_Settings.WORKFLOWS_DIRECTORY) + for py_file in directory.glob("*.py"): + if file and file not in str(py_file): + continue + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + try: + for workflow in foo.WORKFLOWS: + if name: + if name == workflow.name: + print(f"Read workflow [{name}] config from [{module_name}]") + res = [workflow] + break else: - res += foo.WORKFLOWS - print(f"Read workflow configs from [{module_name}]") - except Exception as e: - print( - f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" - ) + continue + else: + res += foo.WORKFLOWS + print(f"Read workflow configs from [{module_name}]") + except Exception as e: + print( + f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" + ) if not res: Utils.raise_with_error(f"Failed to find workflow [{name or file}]") diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 16ffa9056e9..58af211988b 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -342,7 +342,7 @@ def _finish_workflow(workflow, job_name): f"NOTE: Result for [{result.name}] has not ok status [{result.status}]" ) ready_for_merge_status = Result.Status.FAILED - failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name + failed_results.append(result.name) if failed_results: ready_for_merge_description = f"failed: {', '.join(failed_results)}" @@ -362,9 +362,7 @@ def _finish_workflow(workflow, job_name): unlock=False, ) # no lock - no unlock - Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info( - ready_for_merge_description - ) + Result.from_fs(job_name).set_status(Result.Status.SUCCESS) if __name__ == "__main__": diff --git a/ci/praktika/param.py b/ci/praktika/param.py deleted file mode 100644 index f5727198e0d..00000000000 --- a/ci/praktika/param.py +++ /dev/null @@ -1,8 +0,0 @@ -from praktika._environment import _Environment - - -# TODO: find better place and/or right storage for parameter -def get_param(): - env = _Environment.get() - assert env.PARAMETER - return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index f473cf3ed05..842deacbcbd 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -26,10 +26,6 @@ class Result(MetaClasses.Serializable): files (List[str]): A list of file paths or names related to the result. links (List[str]): A list of URLs related to the result (e.g., links to reports or resources). info (str): Additional information about the result. Free-form text. - # TODO: rename - aux_links (List[str]): A list of auxiliary links that provide additional context for the result. - # TODO: remove - html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page). Inner Class: Status: Defines possible statuses for the task, such as "success", "failure", etc. @@ -51,8 +47,6 @@ class Result(MetaClasses.Serializable): files: List[str] = dataclasses.field(default_factory=list) links: List[str] = dataclasses.field(default_factory=list) info: str = "" - aux_links: List[str] = dataclasses.field(default_factory=list) - html_link: str = "" @staticmethod def create_from( diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 5db1a89ce99..1ac8748d1c0 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -80,7 +80,6 @@ class Runner: print("Read GH Environment") env = _Environment.from_env() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() print(env) @@ -128,7 +127,6 @@ class Runner: # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() if param: @@ -143,6 +141,7 @@ class Runner: job.run_in_docker.split("+")[1:], ) from_root = "root" in docker_settings + settings = [s for s in docker_settings if s.startswith("--")] if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -154,9 +153,11 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {' '.join(settings)} {docker} {job.command}" else: cmd = job.command + python_path = os.getenv("PYTHONPATH", ":") + os.environ["PYTHONPATH"] = f".:{python_path}" if param: print(f"Custom --param [{param}] will be passed to job's script") diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index b96c78e4fa7..62eb13b3e19 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -81,25 +81,26 @@ class MetaClasses: class ContextManager: @staticmethod @contextmanager - def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]: + def cd(to: Optional[Union[Path, str]]) -> Iterator[None]: """ changes current working directory to @path or `git root` if @path is None :param to: :return: """ - if not to: - try: - to = Shell.get_output_or_raise("git rev-parse --show-toplevel") - except: - pass - if not to: - if Path(_Settings.DOCKER_WD).is_dir(): - to = _Settings.DOCKER_WD - if not to: - assert False, "FIX IT" - assert to + # if not to: + # try: + # to = Shell.get_output_or_raise("git rev-parse --show-toplevel") + # except: + # pass + # if not to: + # if Path(_Settings.DOCKER_WD).is_dir(): + # to = _Settings.DOCKER_WD + # if not to: + # assert False, "FIX IT" + # assert to old_pwd = os.getcwd() - os.chdir(to) + if to: + os.chdir(to) try: yield finally: diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index 29edc0a27ed..d612881b819 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -119,61 +119,58 @@ class Validator: def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - run_command = job.command - command_parts = run_command.split(" ") - for part in command_parts: - if ">" in part: - return - if "/" in part: - assert ( - Path(part).is_file() or Path(part).is_dir() - ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + run_command = job.command + command_parts = run_command.split(" ") + for part in command_parts: + if ">" in part: + return + if "/" in part: + assert ( + Path(part).is_file() or Path(part).is_dir() + ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - if not job.digest_config: - continue - for include_path in chain( - job.digest_config.include_paths, job.digest_config.exclude_paths - ): - if "*" in include_path: - assert glob.glob( - include_path, recursive=True - ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" - else: - assert ( - Path(include_path).is_file() or Path(include_path).is_dir() - ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + if not job.digest_config: + continue + for include_path in chain( + job.digest_config.include_paths, job.digest_config.exclude_paths + ): + if "*" in include_path: + assert glob.glob( + include_path, recursive=True + ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + else: + assert ( + Path(include_path).is_file() or Path(include_path).is_dir() + ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None: - with ContextManager.cd(): - for job in workflow.jobs: - if job.job_requirements: - if job.job_requirements.python_requirements_txt: - path = Path(job.job_requirements.python_requirements_txt) - message = f"File with py requirement [{path}] does not exist" - if job.name in ( - Settings.DOCKER_BUILD_JOB_NAME, - Settings.CI_CONFIG_JOB_NAME, - Settings.FINISH_WORKFLOW_JOB_NAME, - ): - message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' - message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" - message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" - message += ( - "\n echo requests==2.32.3 >> ./ci/requirements.txt" - ) - message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name + for job in workflow.jobs: + if job.job_requirements: + if job.job_requirements.python_requirements_txt: + path = Path(job.job_requirements.python_requirements_txt) + message = f"File with py requirement [{path}] does not exist" + if job.name in ( + Settings.DOCKER_BUILD_JOB_NAME, + Settings.CI_CONFIG_JOB_NAME, + Settings.FINISH_WORKFLOW_JOB_NAME, + ): + message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' + message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" + message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" + message += ( + "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) + message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" + cls.evaluate_check( + path.is_file(), message, job.name, workflow.name + ) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 1422a835663..f56715755e8 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -81,8 +81,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{{{ github.event.pull_reguest.head.sha }}}} + ref: ${{{{ github.head_ref }}}} {JOB_ADDONS} - name: Prepare env script run: | @@ -191,12 +190,10 @@ jobs: False ), f"Workflow event not yet supported [{workflow_config.event}]" - with ContextManager.cd(): - with open(self._get_workflow_file_name(workflow_config.name), "w") as f: - f.write(yaml_workflow_str) + with open(self._get_workflow_file_name(workflow_config.name), "w") as f: + f.write(yaml_workflow_str) - with ContextManager.cd(): - Shell.check("git add ./.github/workflows/*.yaml") + Shell.check("git add ./.github/workflows/*.yaml") class PullRequestPushYamlGen: diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 33173756924..99fec8b5402 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,7 +8,7 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" - STYLE_CHECKER = "style-checker" + FUNC_TESTER_AMD = "func-tester" BASE_BRANCH = "master" @@ -238,5 +238,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD = "Build" - BUILD_AMD_DEBUG = "Build (amd, debug)" - STATELESS_TESTS = "Stateless tests (amd, debug)" + STATELESS = "Stateless tests" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 10dd77a0414..0d505ae27c4 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -41,8 +41,9 @@ fast_test_job = Job.Config( amd_build_jobs = Job.Config( name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py", + command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}", run_in_docker="clickhouse/fasttest", + timeout=3600 * 2, digest_config=Job.CacheDigestConfig( include_paths=[ "./src", @@ -55,6 +56,7 @@ amd_build_jobs = Job.Config( "./docker/packager/packager", "./rust", "./tests/ci/version_helper.py", + "./ci/jobs/build_clickhouse.py", ], ), ).parametrize( @@ -62,27 +64,53 @@ amd_build_jobs = Job.Config( provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -statless_batch_num = 2 -stateless_tests_amd_debug_jobs = Job.Config( - name=JobNames.STATELESS_TESTS, +stateless_tests_jobs = Job.Config( + name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/stateless-test", + command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.CH_AMD_DEBUG], ).parametrize( parameter=[ - f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) - ] - + ["non-parallel"], - runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] - + [[RunnerLabels.STYLE_CHECKER]], + "amd_debug,parallel", + "amd_debug,non-parallel", + "amd_release,parallel", + "amd_release,non-parallel", + ], + runs_on=[ + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + ], + requires=[ + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_RELEASE], + [ArtifactNames.CH_AMD_RELEASE], + ], ) +# stateless_tests_amd_release_jobs = Job.Config( +# name=JobNames.STATELESS_AMD_RELEASE, +# runs_on=[RunnerLabels.BUILDER], +# command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", +# run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", +# digest_config=Job.CacheDigestConfig( +# include_paths=[ +# "./ci/jobs/functional_stateless_tests.py", +# ], +# ), +# requires=[ArtifactNames.CH_AMD_RELEASE], +# ).parametrize( +# parameter=["parallel", "non-parallel"], +# runs_on=[[RunnerLabels.BUILDER], [RunnerLabels.FUNC_TESTER_AMD]], +# ) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -91,7 +119,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, *amd_build_jobs, - *stateless_tests_amd_debug_jobs, + *stateless_tests_jobs, ], artifacts=[ Artifact.Config( diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 3396b10814a..a0ec080ed75 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2619,14 +2619,14 @@ def run_tests_process(*args, **kwargs): def do_run_tests(jobs, test_suite: TestSuite): - if jobs > 1 and len(test_suite.parallel_tests) > 0: - print( - "Found", - len(test_suite.parallel_tests), - "parallel tests and", - len(test_suite.sequential_tests), - "sequential tests", - ) + print( + "Found", + len(test_suite.parallel_tests), + "parallel tests and", + len(test_suite.sequential_tests), + "sequential tests", + ) + if test_suite.parallel_tests: tests_n = len(test_suite.parallel_tests) jobs = min(jobs, tests_n) @@ -2639,6 +2639,7 @@ def do_run_tests(jobs, test_suite: TestSuite): # It makes it more difficult to detect real flaky tests, # because the distribution and the amount # of failures will be nearly the same for all tests from the group. + # TODO: add shuffle for sequential tests random.shuffle(test_suite.parallel_tests) batch_size = len(test_suite.parallel_tests) // jobs @@ -2684,6 +2685,7 @@ def do_run_tests(jobs, test_suite: TestSuite): if not p.is_alive(): processes.remove(p) + if test_suite.sequential_tests: run_tests_array( ( test_suite.sequential_tests, @@ -2693,17 +2695,7 @@ def do_run_tests(jobs, test_suite: TestSuite): ) ) - return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) - num_tests = len(test_suite.all_tests) - run_tests_array( - ( - test_suite.all_tests, - num_tests, - test_suite, - False, - ) - ) - return num_tests + return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) def is_test_from_dir(suite_dir, case): diff --git a/tests/config/install.sh b/tests/config/install.sh index cdae5741fce..9630977b9c1 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -200,7 +200,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then fi if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then - ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ + #ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 837c05a9c5d..88839c39674 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -4,8 +4,10 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +TEST_DIR=${2:-/repo/tests/} if [ -d "$TEMP_DIR" ]; then + TEST_DIR=$(readlink -f $TEST_DIR) cd "$TEMP_DIR" # add / for minio mc in docker PATH="/:.:$PATH" @@ -79,7 +81,7 @@ start_minio() { pwd mkdir -p ./minio_data minio --version - minio server --address ":11111" ./minio_data & + nohup minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -153,7 +155,7 @@ main() { fi start_minio setup_minio "$1" - upload_data "${query_dir}" "${2:-/repo/tests/}" + upload_data "${query_dir}" "$TEST_DIR" setup_aws_credentials } From 0f945cadc74aed12e6a1f05d7cde98aa02e369b7 Mon Sep 17 00:00:00 2001 From: Derek Chia Date: Fri, 8 Nov 2024 17:34:53 +0800 Subject: [PATCH 304/566] Update settings.md Remove duplicated `background_pool_size` description --- .../server-configuration-parameters/settings.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 02fa5a8ca58..c5f92ccdf68 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -131,16 +131,6 @@ Type: UInt64 Default: 8 -## background_pool_size - -Sets the number of threads performing background merges and mutations for tables with MergeTree engines. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance. - -Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`. - -Type: UInt64 - -Default: 16 - ## background_schedule_pool_size The maximum number of threads that will be used for constantly executing some lightweight periodic operations for replicated tables, Kafka streaming, and DNS cache updates. From bd875401115fb8116302f446c2dec27835b5e958 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 8 Nov 2024 09:45:51 +0000 Subject: [PATCH 305/566] Update tests/queries/0_stateless/03256_invalid_mutation_query.sql MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: János Benjamin Antal --- .../0_stateless/03256_invalid_mutation_query.sql | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/03256_invalid_mutation_query.sql b/tests/queries/0_stateless/03256_invalid_mutation_query.sql index 2c554cabb9e..9b4e8f9a7ea 100644 --- a/tests/queries/0_stateless/03256_invalid_mutation_query.sql +++ b/tests/queries/0_stateless/03256_invalid_mutation_query.sql @@ -3,11 +3,11 @@ DROP TABLE IF EXISTS t2; CREATE TABLE t (x int) ENGINE = MergeTree() ORDER BY (); -DELETE FROM t WHERE y in (SELECT y FROM t); -- { serverError 47 } -DELETE FROM t WHERE x in (SELECT y FROM t); -- { serverError 47 } -DELETE FROM t WHERE x IN (SELECT * FROM t2); -- { serverError 60 } -ALTER TABLE t DELETE WHERE x in (SELECT y FROM t); -- { serverError 47 } -ALTER TABLE t UPDATE x = 1 WHERE x IN (SELECT y FROM t); -- { serverError 47 } +DELETE FROM t WHERE y in (SELECT x FROM t); -- { serverError UNKNOWN_IDENTIFIER } +DELETE FROM t WHERE x in (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } +DELETE FROM t WHERE x IN (SELECT * FROM t2); -- { serverError UNKNOWN_TABLE } +ALTER TABLE t DELETE WHERE x in (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } +ALTER TABLE t UPDATE x = 1 WHERE x IN (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } DELETE FROM t WHERE x IN (SELECT foo FROM bar) SETTINGS validate_mutation_query = 0; From 2d70dd11d27837f2d73fa2b2496ac5d17c1c5a67 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 8 Nov 2024 09:47:23 +0000 Subject: [PATCH 306/566] Make it work without the new analyzer --- src/Interpreters/MutationsInterpreter.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 589791ac871..a35353a6b2a 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1387,9 +1387,17 @@ void MutationsInterpreter::validate() } } + // Make sure the mutation query is valid if (context->getSettingsRef()[Setting::validate_mutation_query]) - // Make sure the mutation query is valid - prepareQueryAffectedQueryTree(commands, source.getStorage(), context); + { + if (context->getSettingsRef()[Setting::allow_experimental_analyzer]) + prepareQueryAffectedQueryTree(commands, source.getStorage(), context); + else + { + ASTPtr select_query = prepareQueryAffectedAST(commands, source.getStorage(), context); + InterpreterSelectQuery(select_query, context, source.getStorage(), metadata_snapshot); + } + } QueryPlan plan; From d75a41f04ccb536b4083034b076e0f6a012e6d06 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:24:28 +0100 Subject: [PATCH 307/566] init --- tests/queries/0_stateless/01287_max_execution_speed.sql | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/01287_max_execution_speed.sql b/tests/queries/0_stateless/01287_max_execution_speed.sql index 0d132999481..89c3050a256 100644 --- a/tests/queries/0_stateless/01287_max_execution_speed.sql +++ b/tests/queries/0_stateless/01287_max_execution_speed.sql @@ -1,5 +1,8 @@ -- Tags: no-fasttest, no-debug, no-tsan, no-msan, no-asan +SET max_rows_to_read=0; +SET max_bytes_to_read=0; + SET min_execution_speed = 100000000000, timeout_before_checking_execution_speed = 0; SELECT count() FROM system.numbers; -- { serverError TOO_SLOW } SET min_execution_speed = 0; From cf1da69f93c4c8e982b89a73565c16642ab0f18f Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 11:44:02 +0100 Subject: [PATCH 308/566] Make keeper entrypoint less verbose, like the in the server --- docker/keeper/entrypoint.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 31e4c8b63da..2b96e4dd655 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -1,6 +1,5 @@ #!/bin/bash -set +x set -eo pipefail shopt -s nullglob From 11f3568f5b661330e3fa94fb1515807dd73d7e22 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 11:45:12 +0100 Subject: [PATCH 309/566] First check the ROOT to assign the env --- docker/keeper/entrypoint.sh | 2 +- docker/server/entrypoint.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 2b96e4dd655..934605b0b6f 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -4,7 +4,7 @@ set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [[ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" || "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" ]]; then +if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then DO_CHOWN=0 fi diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 443bcd7a176..2f87008f2e5 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -4,7 +4,7 @@ set -eo pipefail shopt -s nullglob DO_CHOWN=1 -if [[ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" || "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" ]]; then +if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then DO_CHOWN=0 fi From dd1ca389dbc3b3f5c5f456bc0d070a972acca806 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 8 Nov 2024 10:45:13 +0000 Subject: [PATCH 310/566] Trying to cast filter column. --- src/Processors/QueryPlan/FilterStep.cpp | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 64c46332c34..7613aac618e 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -46,7 +48,19 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N auto name = filter_node->result_name; auto split_result = dag.split({filter_node}, true); dag = std::move(split_result.second); - split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_result.split_nodes_mapping[filter_node]); + + const auto * split_filter_node = split_result.split_nodes_mapping[filter_node]; + auto filter_type = removeLowCardinality(split_filter_node->result_type); + if (!filter_type->onlyNull() && !isUInt8(removeNullable(filter_type))) + { + DataTypePtr cast_type = std::make_shared(); + if (filter_type->isNullable()) + cast_type = std::make_shared(std::move(cast_type)); + + split_result.first.addCast(*split_filter_node, cast_type, {}); + } + + split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_filter_node); return ActionsAndName{std::move(split_result.first), std::move(name)}; } @@ -168,7 +182,7 @@ void FilterStep::describeActions(FormatSettings & settings) const for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag)); - settings.out << prefix << "AND column: " << and_atom.name; + settings.out << prefix << "AND column: " << and_atom.name << '\n'; expression->describeActions(settings.out, prefix); } From b370fefb3c8e5583904ab5fe6b21e4ebbb7de5ad Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 8 Nov 2024 10:53:30 +0000 Subject: [PATCH 311/566] Fix test 03173_forbid_qualify --- tests/queries/0_stateless/03173_forbid_qualify.reference | 1 - tests/queries/0_stateless/03173_forbid_qualify.sql | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03173_forbid_qualify.reference b/tests/queries/0_stateless/03173_forbid_qualify.reference index c2f595d8c4b..648ff45ff18 100644 --- a/tests/queries/0_stateless/03173_forbid_qualify.reference +++ b/tests/queries/0_stateless/03173_forbid_qualify.reference @@ -1,3 +1,2 @@ 100 49 -100 diff --git a/tests/queries/0_stateless/03173_forbid_qualify.sql b/tests/queries/0_stateless/03173_forbid_qualify.sql index 0a41385c52f..04c65fdab9c 100644 --- a/tests/queries/0_stateless/03173_forbid_qualify.sql +++ b/tests/queries/0_stateless/03173_forbid_qualify.sql @@ -7,5 +7,4 @@ select count() from test_qualify; -- 100 select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 1; -- 49 select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } -delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50); -- { serverError UNFINISHED } -select count() from test_qualify; -- 100 +delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50); -- { serverError NOT_IMPLEMENTED } From 5275c0a8c44fd3cae1d078411efe42e2f34df437 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 8 Nov 2024 11:53:46 +0100 Subject: [PATCH 312/566] Reverse order on implicit options --- src/Client/ClientBaseOptimizedParts.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index afffe775029..6eaa3708df6 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -109,8 +109,8 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de && !op.original_tokens[0].empty() && !op.value.empty()) { /// Two special cases for better usability: - /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" + /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// These are relevant for interactive usage - user-friendly, but questionable in general. /// In case of ambiguity or for scripts, prefer using proper options. @@ -119,10 +119,10 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de const char * option; std::error_code ec; - if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) - option = "queries-file"; - else if (token.contains(' ')) + if (token.contains(' ')) option = "query"; + else if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) + option = "queries-file"; else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); From 164e3c26677a209bc7990d326869e71eb3be3bef Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 11:54:43 +0100 Subject: [PATCH 313/566] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 64964f294bd..efa47302343 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -74,6 +74,7 @@ static std::initializer_list Date: Fri, 8 Nov 2024 10:55:52 +0000 Subject: [PATCH 314/566] Better test fix --- tests/queries/0_stateless/03173_forbid_qualify.reference | 1 + tests/queries/0_stateless/03173_forbid_qualify.sql | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03173_forbid_qualify.reference b/tests/queries/0_stateless/03173_forbid_qualify.reference index 648ff45ff18..c2f595d8c4b 100644 --- a/tests/queries/0_stateless/03173_forbid_qualify.reference +++ b/tests/queries/0_stateless/03173_forbid_qualify.reference @@ -1,2 +1,3 @@ 100 49 +100 diff --git a/tests/queries/0_stateless/03173_forbid_qualify.sql b/tests/queries/0_stateless/03173_forbid_qualify.sql index 04c65fdab9c..f7b05a1eb7e 100644 --- a/tests/queries/0_stateless/03173_forbid_qualify.sql +++ b/tests/queries/0_stateless/03173_forbid_qualify.sql @@ -7,4 +7,5 @@ select count() from test_qualify; -- 100 select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 1; -- 49 select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } -delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50); -- { serverError NOT_IMPLEMENTED } +delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50) SETTINGS validate_mutation_query = 0; -- { serverError UNFINISHED } +select count() from test_qualify; -- 100 From 87b9f5cb4ef65bd8c7313bd4f2563e41b974e951 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:24:29 +0100 Subject: [PATCH 315/566] Add min_parts_to_merge_at_once setting --- .../MergeTree/MergeSelectors/SimpleMergeSelector.cpp | 5 ++++- src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h | 2 ++ src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 2 ++ src/Storages/MergeTree/MergeTreeSettings.cpp | 1 + 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp index c393349ef32..4f786215cbe 100644 --- a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp @@ -116,7 +116,7 @@ bool allow( double sum_size, double max_size, double min_age, - double range_size, + size_t range_size, double partition_size, double min_size_to_lower_base_log, double max_size_to_lower_base_log, @@ -125,6 +125,9 @@ bool allow( if (settings.min_age_to_force_merge && min_age >= settings.min_age_to_force_merge) return true; + if (settings.min_parts_to_merge_at_once && range_size < settings.min_parts_to_merge_at_once) + return false; + /// Map size to 0..1 using logarithmic scale /// Use log(1 + x) instead of log1p(x) because our sum_size is always integer. /// Also log1p seems to be slow and significantly affect performance of merges assignment. diff --git a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h index 2d4129b8bf8..1e7676c6aed 100644 --- a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h +++ b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h @@ -90,6 +90,8 @@ public: { /// Zero means unlimited. Can be overridden by the same merge tree setting. size_t max_parts_to_merge_at_once = 100; + /// Zero means no minimum. Can be overridden by the same merge tree setting. + size_t min_parts_to_merge_at_once = 0; /// Some sort of a maximum number of parts in partition. Can be overridden by the same merge tree setting. size_t parts_to_throw_insert = 3000; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 37b6539755c..488f4b2390d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -82,6 +82,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsMergeSelectorAlgorithm merge_selector_algorithm; extern const MergeTreeSettingsBool merge_selector_enable_heuristic_to_remove_small_parts_at_right; extern const MergeTreeSettingsFloat merge_selector_base; + extern const MergeTreeSettingsUInt64 min_parts_to_merge_at_once; } namespace ErrorCodes @@ -566,6 +567,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( simple_merge_settings.max_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::max_parts_to_merge_at_once]; simple_merge_settings.enable_heuristic_to_remove_small_parts_at_right = (*data_settings)[MergeTreeSetting::merge_selector_enable_heuristic_to_remove_small_parts_at_right]; simple_merge_settings.base = (*data_settings)[MergeTreeSetting::merge_selector_base]; + simple_merge_settings.min_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::min_parts_to_merge_at_once]; if (!(*data_settings)[MergeTreeSetting::min_age_to_force_merge_on_partition_only]) simple_merge_settings.min_age_to_force_merge = (*data_settings)[MergeTreeSetting::min_age_to_force_merge_seconds]; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 33910d1048d..fcd4e05cf00 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -102,6 +102,7 @@ namespace ErrorCodes DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \ DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \ DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \ + DECLARE(UInt64, min_parts_to_merge_at_once, 0, "Minimal amount of data parts which merge selector can pick to merge at once (expert level setting, don't change if you don't understand what it is doing). 0 - disabled. Works for Simple and StochasticSimple merge selectors.", 0) \ \ /** Inserts settings. */ \ DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ From b6cad9c913b304052939cd100ba4e9d35b44c47a Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:25:26 +0100 Subject: [PATCH 316/566] Add test --- ...03267_min_parts_to_merge_at_once.reference | 4 ++ .../03267_min_parts_to_merge_at_once.sh | 43 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference create mode 100755 tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference new file mode 100644 index 00000000000..966a0980e59 --- /dev/null +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference @@ -0,0 +1,4 @@ +2 +3 +4 +1 diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh new file mode 100755 index 00000000000..e069b57bf86 --- /dev/null +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t;" + +$CLICKHOUSE_CLIENT --query "CREATE TABLE t (key UInt64) ENGINE = MergeTree() ORDER BY tuple() SETTINGS min_parts_to_merge_at_once=5, merge_selector_base=1" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (1)" +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (2);" + +# doesn't make test flaky +sleep 1 + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (3)" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (4)" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (5)" + +counter=0 retries=60 + +I=0 +while [[ $counter -lt $retries ]]; do + result=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'") + if [ "$result" -eq "1" ];then + break; + fi + sleep 0.5 + counter=$((counter + 1)) +done + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t" From 4c644a98f5985a540ee75dc5a1f5ae31be39cc15 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 8 Nov 2024 12:29:04 +0100 Subject: [PATCH 317/566] Fix broken 03247_ghdata_string_to_json_alter --- .../queries/0_stateless/03247_ghdata_string_to_json_alter.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh index 931d106120c..a2d1788cb5d 100755 --- a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -18,12 +18,12 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ ARRAY JOIN data.payload.commits[].author.name \ - GROUP BY name ORDER BY c DESC, name LIMIT 5" + GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" From 96383d42b184df2e05e9d8aa5ee83dbce4105800 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 8 Nov 2024 11:38:43 +0000 Subject: [PATCH 318/566] Small refactor to ease debugging when something happens on the CI --- src/Interpreters/QueryMetricLog.cpp | 13 ++++++------- .../03203_system_query_metric_log.reference | 6 +++--- .../0_stateless/03203_system_query_metric_log.sh | 6 +++--- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index e784c357b29..4fbe4f9e1b5 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -148,7 +148,7 @@ void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, U { QueryMetricLogStatus query_status; query_status.interval_milliseconds = interval_milliseconds; - query_status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); + query_status.next_collect_time = start_time; auto context = getContext(); const auto & process_list = context->getProcessList(); @@ -213,6 +213,7 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, void QueryMetricLogStatus::scheduleNext(String query_id) { + next_collect_time += std::chrono::milliseconds(interval_milliseconds); const auto now = std::chrono::system_clock::now(); if (next_collect_time > now) { @@ -229,8 +230,9 @@ void QueryMetricLogStatus::scheduleNext(String query_id) std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Schedule next: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next); + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { @@ -276,15 +278,12 @@ std::optional QueryMetricLogStatus::createLogMetricElemen } else { - LOG_TRACE(logger, "Query {} has no profile counters", query_id); + LOG_WARNING(logger, "Query {} has no profile counters", query_id); elem.profile_events = std::vector(ProfileEvents::end()); } if (schedule_next) - { - next_collect_time += std::chrono::milliseconds(interval_milliseconds); scheduleNext(query_id); - } return elem; } diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.reference b/tests/queries/0_stateless/03203_system_query_metric_log.reference index 940b0c4e178..fa8e27a7e90 100644 --- a/tests/queries/0_stateless/03203_system_query_metric_log.reference +++ b/tests/queries/0_stateless/03203_system_query_metric_log.reference @@ -23,8 +23,8 @@ --Interval 123: check that the SleepFunctionCalls, SleepFunctionMilliseconds and ProfileEvent_SleepFunctionElapsedMicroseconds are correct 1 --Check that a query_metric_log_interval=0 disables the collection -0 +1 -Check that a query which execution time is less than query_metric_log_interval is never collected -0 +1 --Check that there is a final event when queries finish -3 +1 diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index bf94be79d7c..abcd14c8e5d 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -84,17 +84,17 @@ check_log 123 # query_metric_log_interval=0 disables the collection altogether $CLICKHOUSE_CLIENT -m -q """ SELECT '--Check that a query_metric_log_interval=0 disables the collection'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_0' + SELECT count() == 0 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_0' """ # a quick query that takes less than query_metric_log_interval is never collected $CLICKHOUSE_CLIENT -m -q """ SELECT '-Check that a query which execution time is less than query_metric_log_interval is never collected'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_fast' + SELECT count() == 0 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_fast' """ # a query that takes more than query_metric_log_interval is collected including the final row $CLICKHOUSE_CLIENT -m -q """ SELECT '--Check that there is a final event when queries finish'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_1000' + SELECT count() > 2 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_1000' """ From 1bd6b9df95792e8917e1da744a0d8e7d586949ed Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:47:48 +0100 Subject: [PATCH 319/566] Fix style check --- tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh index e069b57bf86..90b9d0339cf 100755 --- a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh @@ -28,7 +28,6 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (5)" counter=0 retries=60 -I=0 while [[ $counter -lt $retries ]]; do result=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'") if [ "$result" -eq "1" ];then From 10329cbbf2da51925e5a4580a8ba9faf3315cd02 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 12:07:30 +0100 Subject: [PATCH 320/566] Generate clickhouse/clickhouse-server README as in docker-library --- docker/server/README.md | 8 ++ docker/server/README.sh | 38 +++++ docker/server/README.src/README-short.txt | 1 + docker/server/README.src/content.md | 166 ++++++++++++++++++++++ docker/server/README.src/github-repo | 1 + docker/server/README.src/license.md | 1 + docker/server/README.src/logo.svg | 43 ++++++ docker/server/README.src/maintainer.md | 1 + docker/server/README.src/metadata.json | 7 + 9 files changed, 266 insertions(+) create mode 100755 docker/server/README.sh create mode 100644 docker/server/README.src/README-short.txt create mode 100644 docker/server/README.src/content.md create mode 100644 docker/server/README.src/github-repo create mode 100644 docker/server/README.src/license.md create mode 100644 docker/server/README.src/logo.svg create mode 100644 docker/server/README.src/maintainer.md create mode 100644 docker/server/README.src/metadata.json diff --git a/docker/server/README.md b/docker/server/README.md index 1dc636414ac..e8c60204c96 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -1,3 +1,11 @@ + + # ClickHouse Server Docker Image ## What is ClickHouse? diff --git a/docker/server/README.sh b/docker/server/README.sh new file mode 100755 index 00000000000..42fa72404d1 --- /dev/null +++ b/docker/server/README.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -ueo pipefail + +# A script to generate README.sh close to as it done in https://github.com/docker-library/docs + +WORKDIR=$(dirname "$0") +SCRIPT_NAME=$(basename "$0") +CONTENT=README.src/content.md +LICENSE=README.src/license.md +cd "$WORKDIR" + +R=README.md + +cat > "$R" < + +EOD + +cat "$CONTENT" >> "$R" + +cat >> "$R" <=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. + +## How to use this image + +### start server instance + +```bash +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +``` + +By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking). + +By default, starting above server instance will be run as the `default` user without password. + +### connect to it from a native client + +```bash +docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client clickhouse/clickhouse-server --host clickhouse-server +# OR +docker exec -it some-clickhouse-server clickhouse-client +``` + +More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/). + +### connect to it using curl + +```bash +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +``` + +More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). + +### stopping / removing the container + +```bash +docker stop some-clickhouse-server +docker rm some-clickhouse-server +``` + +### networking + +You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports: + +```bash +docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @- +``` + +`22.6.3.35` + +or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): + +```bash +docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @- +``` + +`22.6.3.35` + +### Volumes + +Typically you may want to mount the following folders inside your container to achieve persistency: + +- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data +- `/var/log/clickhouse-server/` - logs + +```bash +docker run -d \ + -v $(realpath ./ch_data):/var/lib/clickhouse/ \ + -v $(realpath ./ch_logs):/var/log/clickhouse-server/ \ + --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +``` + +You may also want to mount: + +- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments +- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments +- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below). + +### Linux capabilities + +ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html). + +They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities): + +```bash +docker run -d \ + --cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \ + --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +``` + +## Configuration + +The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/). + +ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/)) + +### Start server instance with custom configuration + +```bash +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml clickhouse/clickhouse-server +``` + +### Start server as custom user + +```bash +# $(pwd)/data/clickhouse should exist and be owned by current user +docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +``` + +When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start. + +### Start server from root (useful in case of enabled user namespace) + +```bash +docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +``` + +### How to create default database and user on starting + +Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`: + +```bash +docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp clickhouse/clickhouse-server +``` + +## How to extend this image + +To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service. +Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization. + +For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`: + +```bash +#!/bin/bash +set -e + +clickhouse client -n <<-EOSQL + CREATE DATABASE docker; + CREATE TABLE docker.docker (x Int32) ENGINE = Log; +EOSQL +``` diff --git a/docker/server/README.src/github-repo b/docker/server/README.src/github-repo new file mode 100644 index 00000000000..dc2b6635325 --- /dev/null +++ b/docker/server/README.src/github-repo @@ -0,0 +1 @@ +https://github.com/ClickHouse/docker-library diff --git a/docker/server/README.src/license.md b/docker/server/README.src/license.md new file mode 100644 index 00000000000..6be024edcde --- /dev/null +++ b/docker/server/README.src/license.md @@ -0,0 +1 @@ +View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image. diff --git a/docker/server/README.src/logo.svg b/docker/server/README.src/logo.svg new file mode 100644 index 00000000000..a50dd81a164 --- /dev/null +++ b/docker/server/README.src/logo.svg @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docker/server/README.src/maintainer.md b/docker/server/README.src/maintainer.md new file mode 100644 index 00000000000..26c7db1a293 --- /dev/null +++ b/docker/server/README.src/maintainer.md @@ -0,0 +1 @@ +[ClickHouse Inc.](%%GITHUB-REPO%%) diff --git a/docker/server/README.src/metadata.json b/docker/server/README.src/metadata.json new file mode 100644 index 00000000000..3d3937b21fb --- /dev/null +++ b/docker/server/README.src/metadata.json @@ -0,0 +1,7 @@ +{ + "hub": { + "categories": [ + "databases-and-storage" + ] + } +} From aa15b912df09bb95e400a50aa007266948f75697 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 12:58:50 +0100 Subject: [PATCH 321/566] Apply review comments from docker-library/docs --- docker/server/README.md | 20 +++++++++------- docker/server/README.src/content.md | 36 ++++++++++++++++------------- 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/docker/server/README.md b/docker/server/README.md index e8c60204c96..7403d5b0b2a 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -16,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems, For more information and documentation see https://clickhouse.com/. + ## Versions - The `latest` tag points to the latest release of the latest stable branch. @@ -24,6 +25,7 @@ For more information and documentation see https://clickhouse.com/. - The tag `head` is built from the latest commit to the default branch. - Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. + ### Compatibility - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. @@ -38,7 +40,7 @@ For more information and documentation see https://clickhouse.com/. docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` -By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking). +By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below. By default, starting above server instance will be run as the `default` user without password. @@ -55,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in ### connect to it using curl ```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- ``` More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). @@ -78,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @- `22.6.3.35` -or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): +Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): ```bash docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server @@ -96,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a ```bash docker run -d \ - -v $(realpath ./ch_data):/var/lib/clickhouse/ \ - -v $(realpath ./ch_logs):/var/log/clickhouse-server/ \ + -v "$PWD/ch_data:/var/lib/clickhouse/" \ + -v "$PWD/ch_logs:/var/log/clickhouse-server/" \ --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` @@ -119,6 +121,8 @@ docker run -d \ --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` +Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker). + ## Configuration The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/). @@ -134,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa ### Start server as custom user ```bash -# $(pwd)/data/clickhouse should exist and be owned by current user -docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +# $PWD/data/clickhouse should exist and be owned by current user +docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server ``` When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start. @@ -143,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec ### Start server from root (useful in case of enabled user namespace) ```bash -docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server ``` ### How to create default database and user on starting diff --git a/docker/server/README.src/content.md b/docker/server/README.src/content.md index e790de41236..bfc1a271546 100644 --- a/docker/server/README.src/content.md +++ b/docker/server/README.src/content.md @@ -10,6 +10,7 @@ ClickHouse works 100-1000x faster than traditional database management systems, For more information and documentation see https://clickhouse.com/. + ## Versions - The `latest` tag points to the latest release of the latest stable branch. @@ -18,6 +19,7 @@ For more information and documentation see https://clickhouse.com/. - The tag `head` is built from the latest commit to the default branch. - Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`. + ### Compatibility - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. @@ -29,17 +31,17 @@ For more information and documentation see https://clickhouse.com/. ### start server instance ```bash -docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% ``` -By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking). +By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below. By default, starting above server instance will be run as the `default` user without password. ### connect to it from a native client ```bash -docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client clickhouse/clickhouse-server --host clickhouse-server +docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server # OR docker exec -it some-clickhouse-server clickhouse-client ``` @@ -49,7 +51,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in ### connect to it using curl ```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- ``` More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). @@ -66,16 +68,16 @@ docker rm some-clickhouse-server You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports: ```bash -docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @- ``` `22.6.3.35` -or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): +Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance): ```bash -docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @- ``` @@ -90,9 +92,9 @@ Typically you may want to mount the following folders inside your container to a ```bash docker run -d \ - -v $(realpath ./ch_data):/var/lib/clickhouse/ \ - -v $(realpath ./ch_logs):/var/log/clickhouse-server/ \ - --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server + -v "$PWD/ch_data:/var/lib/clickhouse/" \ + -v "$PWD/ch_logs:/var/log/clickhouse-server/" \ + --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% ``` You may also want to mount: @@ -110,9 +112,11 @@ They are optional and can be enabled using the following [docker command-line ar ```bash docker run -d \ --cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \ - --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server + --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%% ``` +Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker). + ## Configuration The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/). @@ -122,14 +126,14 @@ ClickHouse configuration is represented with a file "config.xml" ([documentation ### Start server instance with custom configuration ```bash -docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml clickhouse/clickhouse-server +docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%% ``` ### Start server as custom user ```bash -# $(pwd)/data/clickhouse should exist and be owned by current user -docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +# $PWD/data/clickhouse should exist and be owned by current user +docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%% ``` When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start. @@ -137,7 +141,7 @@ When you use the image with local directories mounted, you probably want to spec ### Start server from root (useful in case of enabled user namespace) ```bash -docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server +docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%% ``` ### How to create default database and user on starting @@ -145,7 +149,7 @@ docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`: ```bash -docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp clickhouse/clickhouse-server +docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%% ``` ## How to extend this image From 0dbc041d8bc49d2760fe85a8a76431395571dfb8 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 8 Nov 2024 12:00:34 +0000 Subject: [PATCH 322/566] Log when the query finishes for system.query_metric_log ASAP There are logs where we can see that after the query finishes, executeQuery takes up to 2 seconds to call finishQuery. --- src/Interpreters/executeQuery.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 9250c069283..4507126b7b3 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -506,6 +506,7 @@ void logQueryFinish( auto time_now = std::chrono::system_clock::now(); QueryStatusInfo info = process_list_elem->getInfo(true, settings[Setting::log_profile_events]); + logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); elem.type = QueryLogElementType::QUERY_FINISH; addStatusInfoToQueryLogElement(elem, info, query_ast, context); @@ -551,6 +552,7 @@ void logQueryFinish( if (auto query_log = context->getQueryLog()) query_log->add(elem); } + if (settings[Setting::log_processors_profiles]) { if (auto processors_profile_log = context->getProcessorsProfileLog()) @@ -598,8 +600,6 @@ void logQueryFinish( } } } - - logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); } if (query_span) @@ -669,6 +669,7 @@ void logQueryException( { elem.query_duration_ms = start_watch.elapsedMilliseconds(); } + logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, info); elem.query_cache_usage = QueryCache::Usage::None; @@ -698,8 +699,6 @@ void logQueryException( query_span->addAttribute("clickhouse.exception_code", elem.exception_code); query_span->finish(); } - - logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, info); } void logExceptionBeforeStart( @@ -753,6 +752,8 @@ void logExceptionBeforeStart( elem.client_info = context->getClientInfo(); + logQueryMetricLogFinish(context, false, elem.client_info.current_query_id, std::chrono::system_clock::now(), nullptr); + elem.log_comment = settings[Setting::log_comment]; if (elem.log_comment.size() > settings[Setting::max_query_size]) elem.log_comment.resize(settings[Setting::max_query_size]); @@ -797,8 +798,6 @@ void logExceptionBeforeStart( ProfileEvents::increment(ProfileEvents::FailedInsertQuery); } } - - logQueryMetricLogFinish(context, false, elem.client_info.current_query_id, std::chrono::system_clock::now(), nullptr); } void validateAnalyzerSettings(ASTPtr ast, bool context_value) From 1dcd06f098fbd661d1327cd9ecdabd32f67831ce Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Fri, 8 Nov 2024 13:11:36 +0100 Subject: [PATCH 323/566] squash! Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- tests/queries/0_stateless/02812_from_to_utc_timestamp.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 20ae224332c..9eb4484ace0 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -16,6 +16,7 @@ $CLICKHOUSE_CLIENT -q "select x, to_utc_timestamp(toDateTime('2023-03-16 11:22:3 $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" + $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-03-01 00:22:33'), 'EST')" From 6f74b3236bef52beed01aca5007dad13df7a5ae4 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 8 Nov 2024 12:22:57 +0000 Subject: [PATCH 324/566] Fix some tests. --- src/Core/SettingsChangesHistory.cpp | 1 - src/Processors/QueryPlan/FilterStep.cpp | 4 +- .../01655_plan_optimizations.reference | 5 +-- .../0_stateless/01655_plan_optimizations.sh | 4 +- .../02496_remove_redundant_sorting.reference | 13 +++---- ...rouping_sets_predicate_push_down.reference | 38 ++++++++----------- 6 files changed, 27 insertions(+), 38 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index dedf8279533..8f01bacf254 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,7 +75,6 @@ static std::initializer_listresult_name; auto split_result = dag.split({filter_node}, true); dag = std::move(split_result.second); @@ -57,10 +56,11 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N if (filter_type->isNullable()) cast_type = std::make_shared(std::move(cast_type)); - split_result.first.addCast(*split_filter_node, cast_type, {}); + split_filter_node = &split_result.first.addCast(*split_filter_node, cast_type, {}); } split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_filter_node); + auto name = split_filter_node->result_name; return ActionsAndName{std::move(split_result.first), std::move(name)}; } diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index edf93b4b39f..7fc7556e85b 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -82,12 +82,12 @@ Filter column: notEquals(__table1.y, 0_UInt8) 9 10 > one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column -FUNCTION and(minus(s, 8) :: 5, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) +FUNCTION and(minus(s, 8) :: 3, minus(s, 4) :: 5) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) > (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column -FUNCTION and(minus(__table1.s, 8_UInt8) :: 1, minus(__table1.s, 4_UInt8) :: 2) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8)) +FUNCTION and(minus(__table1.s, 8_UInt8) :: 3, minus(__table1.s, 4_UInt8) :: 5) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8)) Aggregating Filter column: notEquals(__table1.y, 0_UInt8) 0 1 @@ -163,7 +163,6 @@ Filter column: notEquals(__table1.y, 2_UInt8) > filter is pushed down before CreatingSets CreatingSets Filter -Filter 1 3 > one condition of filter is pushed down before LEFT JOIN diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 42cdac8c01f..04ab9bbd11c 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -89,14 +89,14 @@ $CLICKHOUSE_CLIENT --enable_analyzer=0 --convert_query_to_cnf=0 -q " select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 5, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 3, minus(s, 4) :: 5) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" echo "> (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed" $CLICKHOUSE_CLIENT --enable_analyzer=1 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: notEquals(__table1.y, 0_UInt8)\|FUNCTION and(minus(__table1.s, 8_UInt8) :: 1, minus(__table1.s, 4_UInt8) :: 2) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8))" + grep -o "Aggregating\|Filter column\|Filter column: notEquals(__table1.y, 0_UInt8)\|FUNCTION and(minus(__table1.s, 8_UInt8) :: 3, minus(__table1.s, 4_UInt8) :: 5) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index 7824fd8cba9..00db41e8ac5 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -332,13 +332,12 @@ SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it d Expression (Projection) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - Filter ((WHERE + (Projection + Before ORDER BY))) - Filter (HAVING) - Aggregating - Expression ((Before GROUP BY + Projection)) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - ReadFromSystemNumbers + Filter (((WHERE + (Projection + Before ORDER BY)) + HAVING)) + Aggregating + Expression ((Before GROUP BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromSystemNumbers -- execute 1 2 diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference index 9bb0c022752..a382e14ce03 100644 --- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference +++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference @@ -28,21 +28,17 @@ WHERE type_1 = \'all\' (Expression) ExpressionTransform × 2 (Filter) - FilterTransform × 2 - (Filter) - FilterTransform × 2 - (Filter) - FilterTransform × 2 - (Aggregating) - ExpressionTransform × 2 - AggregatingTransform × 2 - Copy 1 → 2 - (Expression) - ExpressionTransform - (Expression) - ExpressionTransform - (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + FilterTransform × 6 + (Aggregating) + ExpressionTransform × 2 + AggregatingTransform × 2 + Copy 1 → 2 + (Expression) + ExpressionTransform + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 (Expression) ExpressionTransform × 2 (Filter) @@ -68,14 +64,10 @@ ExpressionTransform × 2 ExpressionTransform × 2 AggregatingTransform × 2 Copy 1 → 2 - (Filter) - FilterTransform - (Filter) - FilterTransform - (Expression) - ExpressionTransform - (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 (Expression) ExpressionTransform × 2 (Aggregating) From da0e267278efa2f42e0f18bf5a4b78a5d16dbe99 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:30:21 +0100 Subject: [PATCH 325/566] Fix typo --- .../queries/0_stateless/03247_ghdata_string_to_json_alter.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh index a2d1788cb5d..e8368b6702a 100755 --- a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -18,12 +18,12 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1 --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ ARRAY JOIN data.payload.commits[].author.name \ - GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 + GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1 --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" From a8d07555d4d01e1f261dd5c6c6f003a5581c2339 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 12:31:22 +0000 Subject: [PATCH 326/566] Update 02967_parallel_replicas_joins_and_analyzer EXPLAIN with RIGHT JOIN changed --- ...llel_replicas_joins_and_analyzer.reference | 99 +++++++------------ 1 file changed, 35 insertions(+), 64 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index 93003b6cf6d..1269f792e76 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -266,24 +266,13 @@ Expression Join Expression Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + ReadFromMemoryStorage + Expression + Expression + ReadFromMergeTree + Expression + ReadFromMemoryStorage -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -317,27 +306,19 @@ select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_paralle Expression Sorting Expression - Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Sorting Expression Join - Union - Expression + Expression + ReadFromMemoryStorage + Expression + Join Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression + Expression + ReadFromMergeTree Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromMergeTree -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -722,28 +703,22 @@ sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -Expression - Join - Expression - Join - Union +Union + Expression + Join + Expression + Join Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromRemoteParallelReplicas -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -776,28 +751,24 @@ sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll. select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting - Expression - Join - Union + Union + Expression + Sorting Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Expression - Join - Union + Join Expression Expression ReadFromMergeTree Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Join + Expression + Expression + ReadFromMergeTree + Expression + Expression + ReadFromMergeTree + Expression + ReadFromRemoteParallelReplicas -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), From 955f537bd5ef2f4a29717ac4999ce2af47b4c039 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 8 Nov 2024 12:28:06 +0000 Subject: [PATCH 327/566] Add a new setting query_metric_log_debug to avoid the noise --- src/Core/Settings.cpp | 5 +++++ src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/QueryMetricLog.cpp | 18 +++++++++++------- src/Interpreters/QueryMetricLog.h | 3 ++- src/Interpreters/executeQuery.cpp | 3 ++- .../03203_system_query_metric_log.sh | 10 +++++----- 6 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..d07cd7352a1 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2784,6 +2784,11 @@ If set to any negative value, it will take the value `collect_interval_milliseco To disable the collection of a single query, set `query_metric_log_interval` to 0. Default value: -1 + )", 0) \ + DECLARE(Bool, query_metric_log_debug, false, R"( +Turns on debugging traces for system.query_metric_log + +Default value: false )", 0) \ DECLARE(LogsLevel, send_logs_level, LogsLevel::fatal, R"( Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none' diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ed87fde8b7e..a3e21aa670f 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,6 +75,7 @@ static std::initializer_listgetProcessList(); @@ -222,21 +223,24 @@ void QueryMetricLogStatus::scheduleNext(String query_id) } else { - LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", - query_id, timePointToString(next_collect_time)); + if (debug_traces) + LOG_DEBUG(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); task->schedule(); } } std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), - schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); + if (debug_traces) + LOG_DEBUG(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { - LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); + if (debug_traces) + LOG_DEBUG(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 65764229b0a..5f301b2cd13 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -51,6 +51,7 @@ struct QueryMetricLogStatus std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); + bool debug_traces = false; /// We need to be able to move it for the hash map, so we need to add an indirection here. std::unique_ptr mutex = std::make_unique(); @@ -78,7 +79,7 @@ public: void shutdown() final; /// Both startQuery and finishQuery are called from the thread that executes the query. - void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); + void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds, bool debug_traces = false); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 4507126b7b3..794d3dab0e6 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -146,6 +146,7 @@ namespace Setting extern const SettingsQueryCacheSystemTableHandling query_cache_system_table_handling; extern const SettingsSeconds query_cache_ttl; extern const SettingsInt64 query_metric_log_interval; + extern const SettingsBool query_metric_log_debug; extern const SettingsOverflowMode read_overflow_mode; extern const SettingsOverflowMode read_overflow_mode_leaf; extern const SettingsOverflowMode result_overflow_mode; @@ -455,7 +456,7 @@ QueryLogElement logQueryStart( { auto interval_milliseconds = getQueryMetricLogInterval(context); if (interval_milliseconds > 0) - query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds); + query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds, settings[Setting::query_metric_log_debug]); } return elem; diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index abcd14c8e5d..4bc764b777c 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -6,11 +6,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) readonly query_prefix=$CLICKHOUSE_DATABASE -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=400 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=123 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=0 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=400 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=123 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=0 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) SETTINGS query_metric_log_debug=true FORMAT Null" & wait From fd9f32708371246e36b289164cf402230bc860c6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 13:49:08 +0100 Subject: [PATCH 328/566] Allow to disable memory buffer increase for filesystem cache --- src/Core/Settings.cpp | 3 +++ src/Disks/ObjectStorages/DiskObjectStorage.cpp | 5 ++++- src/IO/ReadSettings.h | 1 + src/Interpreters/Context.cpp | 2 ++ src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 2 +- 5 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 6f0109fa300..9a821879c5b 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4872,6 +4872,9 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ DECLARE(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which detiriorate cache performance )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index 3720c04a471..fba45d5a0c9 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -642,7 +642,10 @@ std::unique_ptr DiskObjectStorage::readFile( }; /// Avoid cache fragmentation by choosing bigger buffer size. - bool prefer_bigger_buffer_size = object_storage->supportsCache() && read_settings.enable_filesystem_cache; + bool prefer_bigger_buffer_size = read_settings.filesystem_cache_prefer_bigger_buffer_size + && object_storage->supportsCache() + && read_settings.enable_filesystem_cache; + size_t buffer_size = prefer_bigger_buffer_size ? std::max(settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) : settings.remote_fs_buffer_size; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 6ed02212095..c1747314c76 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -61,6 +61,7 @@ struct ReadSettings bool filesystem_cache_allow_background_download = true; bool filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = true; bool filesystem_cache_allow_background_download_during_fetch = true; + bool filesystem_cache_prefer_bigger_buffer_size = true; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c1fa2c8549a..d42002bf98d 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -196,6 +196,7 @@ namespace Setting extern const SettingsUInt64 filesystem_cache_segments_batch_size; extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage; extern const SettingsBool filesystem_cache_enable_background_download_during_fetch; + extern const SettingsBool filesystem_cache_prefer_bigger_buffer_size; extern const SettingsBool http_make_head_request; extern const SettingsUInt64 http_max_fields; extern const SettingsUInt64 http_max_field_name_size; @@ -5751,6 +5752,7 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage]; res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; + res.filesystem_cache_prefer_bigger_buffer_size = settings_ref[Setting::filesystem_cache_prefer_bigger_buffer_size]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 563bdc44760..1ccf23ade90 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -517,7 +517,7 @@ std::unique_ptr StorageObjectStorageSource::createReadBu LOG_TRACE(log, "Downloading object of size {} with initial prefetch", object_size); - bool prefer_bigger_buffer_size = impl->isCached(); + bool prefer_bigger_buffer_size = read_settings.filesystem_cache_prefer_bigger_buffer_size && impl->isCached(); size_t buffer_size = prefer_bigger_buffer_size ? std::max(read_settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) : read_settings.remote_fs_buffer_size; From fe73c1880a67340b8eea8c7d27a4f0a58aa42cd9 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:06:59 +0100 Subject: [PATCH 329/566] Update src/Core/Settings.cpp Co-authored-by: Nikita Taranov --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 9a821879c5b..8feb758df0f 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4874,7 +4874,7 @@ Limit on size of a single batch of file segments that a read buffer can request Wait time to lock cache for space reservation in filesystem cache )", 0) \ DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( -Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which detiriorate cache performance +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache From 298b172c49493a88c87fde4d0e09a6413102de55 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 22:30:45 +0100 Subject: [PATCH 330/566] Add fallback to getgrgid_r and getpwuid_r for UID and GID arguments of clickhouse-su --- programs/su/su.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/programs/su/su.cpp b/programs/su/su.cpp index 33d929898f4..40242d0687f 100644 --- a/programs/su/su.cpp +++ b/programs/su/su.cpp @@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid); if (!result) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid); + { + if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result)) + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid); + + if (!result) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid); + } gid = entry.gr_gid; } @@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid); if (!result) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid); + { + if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result)) + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid); + + if (!result) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid); + } uid = entry.pw_uid; } From 69ae05210364cf03bddf62b13bd752857bcbbedc Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 8 Nov 2024 10:22:01 +0000 Subject: [PATCH 331/566] SimSIMD: Improve suppression for msan false positive --- contrib/SimSIMD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index ee3c9c9c00b..9e3cfc32d26 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21 +Subproject commit 9e3cfc32d26fbeece91e34df8668db28c0ca006a From ba20032987a042d45b4073e93eb5279222aff4ac Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 8 Nov 2024 14:08:36 +0000 Subject: [PATCH 332/566] Fix build --- contrib/SimSIMD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 9e3cfc32d26..bb0bd2e7137 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 9e3cfc32d26fbeece91e34df8668db28c0ca006a +Subproject commit bb0bd2e7137f02c555341d7c93124ed19f3c24fb From fdc18a6a28e6d44efbcd5a5e488e62478b411011 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 15:01:38 +0100 Subject: [PATCH 333/566] Add a separate setting for background download max size --- src/Interpreters/Cache/FileCache.cpp | 17 ++++++++++++++- src/Interpreters/Cache/FileCache.h | 2 ++ src/Interpreters/Cache/FileCacheSettings.cpp | 3 +++ src/Interpreters/Cache/FileCacheSettings.h | 2 ++ src/Interpreters/Cache/FileCache_fwd.h | 1 + src/Interpreters/Cache/FileSegment.cpp | 5 ++++- src/Interpreters/Cache/Metadata.cpp | 22 +++++++++++++++++++- src/Interpreters/Cache/Metadata.h | 6 ++++++ 8 files changed, 55 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 7de3f7af78d..bda91b31692 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -103,7 +103,11 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s , keep_current_elements_to_max_ratio(1 - settings.keep_free_space_elements_ratio) , keep_up_free_space_remove_batch(settings.keep_free_space_remove_batch) , log(getLogger("FileCache(" + cache_name + ")")) - , metadata(settings.base_path, settings.background_download_queue_size_limit, settings.background_download_threads, write_cache_per_user_directory) + , metadata(settings.base_path, + settings.background_download_queue_size_limit, + settings.background_download_threads, + settings.background_download_max_file_segment_size, + write_cache_per_user_directory) { if (settings.cache_policy == "LRU") { @@ -1600,6 +1604,17 @@ void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings, } } + if (new_settings.background_download_max_file_segment_size != actual_settings.background_download_max_file_segment_size) + { + metadata.setBackgroundDownloadMaxFileSegmentSize(new_settings.background_download_max_file_segment_size); + + LOG_INFO(log, "Changed background_download_max_file_segment_size from {} to {}", + actual_settings.background_download_max_file_segment_size, + new_settings.background_download_max_file_segment_size); + + actual_settings.background_download_max_file_segment_size = new_settings.background_download_max_file_segment_size; + } + if (new_settings.max_size != actual_settings.max_size || new_settings.max_elements != actual_settings.max_elements) { diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 810ed481300..79966e60ad9 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -161,6 +161,8 @@ public: size_t getMaxFileSegmentSize() const { return max_file_segment_size; } + size_t getBackgroundDownloadMaxFileSegmentSize() const { return metadata.getBackgroundDownloadMaxFileSegmentSize(); } + bool tryReserve( FileSegment & file_segment, size_t size, diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index e162d6b7551..9cd0ee750de 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -62,6 +62,9 @@ void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetStrin if (has("background_download_queue_size_limit")) background_download_queue_size_limit = get_uint("background_download_queue_size_limit"); + if (has("background_download_max_file_segment_size")) + background_download_threads = get_uint("background_download_max_file_segment_size"); + if (has("load_metadata_threads")) load_metadata_threads = get_uint("load_metadata_threads"); diff --git a/src/Interpreters/Cache/FileCacheSettings.h b/src/Interpreters/Cache/FileCacheSettings.h index 72a2b6c3369..9cf72a2bdff 100644 --- a/src/Interpreters/Cache/FileCacheSettings.h +++ b/src/Interpreters/Cache/FileCacheSettings.h @@ -43,6 +43,8 @@ struct FileCacheSettings double keep_free_space_elements_ratio = FILECACHE_DEFAULT_FREE_SPACE_ELEMENTS_RATIO; size_t keep_free_space_remove_batch = FILECACHE_DEFAULT_FREE_SPACE_REMOVE_BATCH; + size_t background_download_max_file_segment_size = FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE_WITH_BACKGROUND_DOWLOAD; + void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); void loadFromCollection(const NamedCollection & collection); diff --git a/src/Interpreters/Cache/FileCache_fwd.h b/src/Interpreters/Cache/FileCache_fwd.h index da75f30f0e8..3d461abd065 100644 --- a/src/Interpreters/Cache/FileCache_fwd.h +++ b/src/Interpreters/Cache/FileCache_fwd.h @@ -6,6 +6,7 @@ namespace DB static constexpr int FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 32 * 1024 * 1024; /// 32Mi static constexpr int FILECACHE_DEFAULT_FILE_SEGMENT_ALIGNMENT = 4 * 1024 * 1024; /// 4Mi +static constexpr int FILECACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE_WITH_BACKGROUND_DOWLOAD = 4 * 1024 * 1024; /// 4Mi static constexpr int FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_THREADS = 5; static constexpr int FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_QUEUE_SIZE_LIMIT = 5000; static constexpr int FILECACHE_DEFAULT_LOAD_METADATA_THREADS = 16; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 541f0f5607a..2455461435b 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -708,7 +708,10 @@ void FileSegment::complete(bool allow_background_download) if (is_last_holder) { bool added_to_download_queue = false; - if (allow_background_download && background_download_enabled && remote_file_reader) + if (allow_background_download + && background_download_enabled + && remote_file_reader + && downloaded_size < cache->getBackgroundDownloadMaxFileSegmentSize()) { ProfileEvents::increment(ProfileEvents::FilesystemCacheBackgroundDownloadQueuePush); added_to_download_queue = locked_key->addToDownloadQueue(offset(), segment_lock); /// Finish download in background. diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 231545212cd..257401d2ce6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -168,6 +168,7 @@ CacheMetadata::CacheMetadata( const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_, + size_t background_download_max_file_segment_size_, bool write_cache_per_user_directory_) : path(path_) , cleanup_queue(std::make_shared()) @@ -175,6 +176,7 @@ CacheMetadata::CacheMetadata( , write_cache_per_user_directory(write_cache_per_user_directory_) , log(getLogger("CacheMetadata")) , download_threads_num(background_download_threads_) + , download_max_file_segment_size(background_download_max_file_segment_size_) { } @@ -630,6 +632,9 @@ void CacheMetadata::downloadThreadFunc(const bool & stop_flag) auto & file_segment = holder->front(); + if (file_segment.getDownloadedSize() >= download_max_file_segment_size) + continue; + if (file_segment.getOrSetDownloader() != FileSegment::getCallerId()) continue; @@ -701,10 +706,25 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional(reader->getPosition())) reader->seek(offset, SEEK_SET); - while (!reader->eof()) + bool stop = false; + const size_t max_file_segment_size = download_max_file_segment_size.load(); + + while (!stop && !reader->eof()) { auto size = reader->available(); + const size_t downloaded_size = file_segment.getDownloadedSize(); + if (downloaded_size >= max_file_segment_size) + break; + + if (download_max_file_segment_size + size > max_file_segment_size) + { + /// Do not download more than download_max_file_segment_size + /// because we want to leave right boundary of file segment aligned. + size = max_file_segment_size - downloaded_size; + stop = true; + } + std::string failure_reason; if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds, failure_reason)) { diff --git a/src/Interpreters/Cache/Metadata.h b/src/Interpreters/Cache/Metadata.h index 0e85ead3265..526b82c9a68 100644 --- a/src/Interpreters/Cache/Metadata.h +++ b/src/Interpreters/Cache/Metadata.h @@ -165,6 +165,7 @@ public: const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_, + size_t background_download_max_file_segment_size_, bool write_cache_per_user_directory_); void startup(); @@ -210,6 +211,10 @@ public: bool setBackgroundDownloadThreads(size_t threads_num); size_t getBackgroundDownloadThreads() const { return download_threads.size(); } + + void setBackgroundDownloadMaxFileSegmentSize(size_t max_file_segment_size) { download_max_file_segment_size = max_file_segment_size; } + size_t getBackgroundDownloadMaxFileSegmentSize() const { return download_max_file_segment_size; } + bool setBackgroundDownloadQueueSizeLimit(size_t size); bool isBackgroundDownloadEnabled(); @@ -241,6 +246,7 @@ private: }; std::atomic download_threads_num; + std::atomic download_max_file_segment_size; std::vector> download_threads; std::unique_ptr cleanup_thread; From aeb2cbf934c76d082b01dc023b28562efb5d6e02 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:26:41 +0100 Subject: [PATCH 334/566] Update Settings.cpp --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 8feb758df0f..07a2c52d72f 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4874,7 +4874,7 @@ Limit on size of a single batch of file segments that a read buffer can request Wait time to lock cache for space reservation in filesystem cache )", 0) \ DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( -Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance. On the other hand, enabling this setting might increase memory usage. )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache From 1561a0115fa740c746ccb054552de3ad751e12ae Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 14:30:02 +0000 Subject: [PATCH 335/566] Fix test, set min_bytes_to_use_direct_io expicitly --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 166910d496f..aca4fc6b6c3 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -1,6 +1,8 @@ drop table if exists X sync; drop table if exists Y sync; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 + create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); @@ -10,7 +12,6 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; -set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 00e1c5cf0b3e5ddcfee91e8ba1a9ba0e4607354a Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 16:30:54 +0100 Subject: [PATCH 336/566] Update FileCacheSettings.cpp --- src/Interpreters/Cache/FileCacheSettings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index 9cd0ee750de..8f0c5206211 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -63,7 +63,7 @@ void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetStrin background_download_queue_size_limit = get_uint("background_download_queue_size_limit"); if (has("background_download_max_file_segment_size")) - background_download_threads = get_uint("background_download_max_file_segment_size"); + background_download_max_file_segment_size = get_uint("background_download_max_file_segment_size"); if (has("load_metadata_threads")) load_metadata_threads = get_uint("load_metadata_threads"); From e18ff6e56bda4916064f3135c6b2e59436606235 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 16:31:58 +0100 Subject: [PATCH 337/566] Update Metadata.cpp --- src/Interpreters/Cache/Metadata.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 257401d2ce6..2ef8f76aca0 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -717,7 +717,7 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional= max_file_segment_size) break; - if (download_max_file_segment_size + size > max_file_segment_size) + if (downloaded_size + size > max_file_segment_size) { /// Do not download more than download_max_file_segment_size /// because we want to leave right boundary of file segment aligned. From 0929f66516261dea7b31479b8f5eaac1b4b8e38a Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 17:01:09 +0100 Subject: [PATCH 338/566] Update test --- tests/integration/test_storage_s3_queue/test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 284b304c632..62afc0b1c1d 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1000,6 +1000,9 @@ def test_max_set_age(started_cluster): assert "Cannot parse input" in node.query( f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'" ) + assert "Cannot parse input" in node.query( + f"SELECT exception FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' ORDER BY processing_end_time DESC LIMIT 1" + ) assert 1 == int( node.query( From 5d2e1547a89cb43d545c5847cf47565f319bbd75 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:34:06 -0500 Subject: [PATCH 339/566] use `/var/log/mysql/` instead of `/mysql/` --- tests/integration/compose/docker_compose_mysql.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/compose/docker_compose_mysql.yml b/tests/integration/compose/docker_compose_mysql.yml index f45410bde78..91df21165ea 100644 --- a/tests/integration/compose/docker_compose_mysql.yml +++ b/tests/integration/compose/docker_compose_mysql.yml @@ -5,7 +5,7 @@ services: environment: MYSQL_ROOT_PASSWORD: clickhouse MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST} - DATADIR: /mysql/ + DATADIR: /var/log/mysql/ expose: - ${MYSQL_PORT:-3306} command: --server_id=100 @@ -14,11 +14,11 @@ services: --gtid-mode="ON" --enforce-gtid-consistency --log-error-verbosity=3 - --log-error=/mysql/error.log + --log-error=/var/log/mysql/error.log --general-log=ON - --general-log-file=/mysql/general.log + --general-log-file=/var/log/mysql/general.log volumes: - type: ${MYSQL_LOGS_FS:-tmpfs} source: ${MYSQL_LOGS:-} - target: /mysql/ + target: /var/log/mysql/ user: ${MYSQL_DOCKER_USER} From 97ec890b8e3e06d9914e573363965ef439f76d21 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:36:36 -0500 Subject: [PATCH 340/566] use `/var/log/mysql/` instead of `/mysql/`, fix `MYSQL_ROOT_HOST` env initialization --- tests/integration/compose/docker_compose_mysql_8_0.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/compose/docker_compose_mysql_8_0.yml b/tests/integration/compose/docker_compose_mysql_8_0.yml index e1ff1633bc7..e1e2e241443 100644 --- a/tests/integration/compose/docker_compose_mysql_8_0.yml +++ b/tests/integration/compose/docker_compose_mysql_8_0.yml @@ -4,8 +4,8 @@ services: restart: always environment: MYSQL_ROOT_PASSWORD: clickhouse - MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST} - DATADIR: /mysql/ + MYSQL_ROOT_HOST: ${MYSQL8_ROOT_HOST} + DATADIR: /var/log/mysql/ expose: - ${MYSQL8_PORT:-3306} command: --server_id=100 --log-bin='mysql-bin-1.log' @@ -13,11 +13,11 @@ services: --default-time-zone='+3:00' --gtid-mode="ON" --enforce-gtid-consistency --log-error-verbosity=3 - --log-error=/mysql/error.log + --log-error=/var/log/mysql/error.log --general-log=ON - --general-log-file=/mysql/general.log + --general-log-file=/var/log/mysql/general.log volumes: - type: ${MYSQL8_LOGS_FS:-tmpfs} source: ${MYSQL8_LOGS:-} - target: /mysql/ + target: /var/log/mysql/ user: ${MYSQL8_DOCKER_USER} From fe39c4b65bfee09d9c7d5327963983fbd4cdd234 Mon Sep 17 00:00:00 2001 From: Tanya Bragin Date: Fri, 8 Nov 2024 08:55:20 -0800 Subject: [PATCH 341/566] Update README.md - Update meetups Add Stockholm --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dcaeda13acd..abaf27abf11 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ Upcoming meetups * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 * [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3 +* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9 * [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9 * [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12 From 9dc4046b897bd7cd185c0f5e0e221dea7481f8a9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 18:02:41 +0100 Subject: [PATCH 342/566] Add index granularity size column to system.parts --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 9 +++++++++ src/Storages/MergeTree/IMergeTreeDataPart.h | 2 ++ .../MergeTree/MergeTreeIndexGranularity.cpp | 10 ++++++++++ src/Storages/MergeTree/MergeTreeIndexGranularity.h | 3 +++ src/Storages/System/StorageSystemParts.cpp | 6 ++++++ .../03268_system_parts_index_granularity.reference | 1 + .../03268_system_parts_index_granularity.sql | 14 ++++++++++++++ 7 files changed, 45 insertions(+) create mode 100644 tests/queries/0_stateless/03268_system_parts_index_granularity.reference create mode 100644 tests/queries/0_stateless/03268_system_parts_index_granularity.sql diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 7453d609fa9..51c445945e6 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -624,6 +624,15 @@ UInt64 IMergeTreeDataPart::getIndexSizeInAllocatedBytes() const return res; } +UInt64 IMergeTreeDataPart::getIndexGranularityBytes() const +{ + return index_granularity.getBytesSize(); +} +UInt64 IMergeTreeDataPart::getIndexGranularityAllocatedBytes() const +{ + return index_granularity.getBytesAllocated(); +} + void IMergeTreeDataPart::assertState(const std::initializer_list & affordable_states) const { if (!checkState(affordable_states)) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index b41a1d840e1..55f1265318c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -380,6 +380,8 @@ public: /// For data in RAM ('index') UInt64 getIndexSizeInBytes() const; UInt64 getIndexSizeInAllocatedBytes() const; + UInt64 getIndexGranularityBytes() const; + UInt64 getIndexGranularityAllocatedBytes() const; UInt64 getMarksCount() const; UInt64 getIndexSizeFromFile() const; diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index c3e740bde84..bf0ba17d473 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -128,4 +128,14 @@ void MergeTreeIndexGranularity::shrinkToFitInMemory() marks_rows_partial_sums.shrink_to_fit(); } +uint64_t MergeTreeIndexGranularity::getBytesSize() const +{ + return marks_rows_partial_sums.size() * sizeof(size_t); +} +uint64_t MergeTreeIndexGranularity::getBytesAllocated() const +{ + return marks_rows_partial_sums.capacity() * sizeof(size_t); +} + + } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index 9b8375dd2d8..c616d2ac49a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -102,6 +102,9 @@ public: std::string describe() const; void shrinkToFitInMemory(); + + uint64_t getBytesSize() const; + uint64_t getBytesAllocated() const; }; } diff --git a/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp index 56a45d7b51d..d0e34842198 100644 --- a/src/Storages/System/StorageSystemParts.cpp +++ b/src/Storages/System/StorageSystemParts.cpp @@ -75,6 +75,8 @@ StorageSystemParts::StorageSystemParts(const StorageID & table_id_) {"data_version", std::make_shared(), "Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than data_version)."}, {"primary_key_bytes_in_memory", std::make_shared(), "The amount of memory (in bytes) used by primary key values."}, {"primary_key_bytes_in_memory_allocated", std::make_shared(), "The amount of memory (in bytes) reserved for primary key values."}, + {"index_granularity_bytes_in_memory", std::make_shared(), "The amount of memory (in bytes) used by index granularity values."}, + {"index_granularity_bytes_in_memory_allocated", std::make_shared(), "The amount of memory (in bytes) reserved for index granularity values."}, {"is_frozen", std::make_shared(), "Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. "}, {"database", std::make_shared(), "Name of the database."}, @@ -216,6 +218,10 @@ void StorageSystemParts::processNextStorage( columns[res_index++]->insert(part->getIndexSizeInBytes()); if (columns_mask[src_index++]) columns[res_index++]->insert(part->getIndexSizeInAllocatedBytes()); + if (columns_mask[src_index++]) + columns[res_index++]->insert(part->getIndexGranularityBytes()); + if (columns_mask[src_index++]) + columns[res_index++]->insert(part->getIndexGranularityAllocatedBytes()); if (columns_mask[src_index++]) columns[res_index++]->insert(part->is_frozen.load(std::memory_order_relaxed)); diff --git a/tests/queries/0_stateless/03268_system_parts_index_granularity.reference b/tests/queries/0_stateless/03268_system_parts_index_granularity.reference new file mode 100644 index 00000000000..f301cd54ad2 --- /dev/null +++ b/tests/queries/0_stateless/03268_system_parts_index_granularity.reference @@ -0,0 +1 @@ +88 88 diff --git a/tests/queries/0_stateless/03268_system_parts_index_granularity.sql b/tests/queries/0_stateless/03268_system_parts_index_granularity.sql new file mode 100644 index 00000000000..009a15d0825 --- /dev/null +++ b/tests/queries/0_stateless/03268_system_parts_index_granularity.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t ( + key UInt64, + value String +) +ENGINE MergeTree() +ORDER by key SETTINGS index_granularity = 10, index_granularity_bytes = '1024K'; + +INSERT INTO t SELECT number, toString(number) FROM numbers(100); + +SELECT index_granularity_bytes_in_memory, index_granularity_bytes_in_memory_allocated FROM system.parts where table = 't' and database = currentDatabase(); + +DROP TABLE IF EXISTS t; From 37c24838693e573428414016a619fa70de61823a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 17:09:23 +0000 Subject: [PATCH 343/566] Do not randomize min_bytes_to_use_direct_io --- tests/clickhouse-test | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 9c035b7cc35..a1ffcc2030f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -821,9 +821,10 @@ class SettingsRandomizer: "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), "use_uncompressed_cache": lambda: random.randint(0, 1), - "min_bytes_to_use_direct_io": threshold_generator( - 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 - ), + # see https://github.com/ClickHouse/ClickHouse/issues/65690 + # "min_bytes_to_use_direct_io": threshold_generator( + # 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + # ), "min_bytes_to_use_mmap_io": threshold_generator( 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 ), From 6c223c92bd852b56c713aff768b07c4adb90d5dc Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 18:13:29 +0100 Subject: [PATCH 344/566] btter --- .../queries/0_stateless/03268_system_parts_index_granularity.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03268_system_parts_index_granularity.sql b/tests/queries/0_stateless/03268_system_parts_index_granularity.sql index 009a15d0825..1bab7840856 100644 --- a/tests/queries/0_stateless/03268_system_parts_index_granularity.sql +++ b/tests/queries/0_stateless/03268_system_parts_index_granularity.sql @@ -1,3 +1,4 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings DROP TABLE IF EXISTS t; CREATE TABLE t ( From 6d2504662a45e0c35758698ec60ac265309c0f6b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 01:01:46 +0100 Subject: [PATCH 345/566] Update tests --- tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql | 1 + tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql | 1 + tests/queries/0_stateless/01958_partial_hour_timezone.sql | 2 ++ tests/queries/0_stateless/02125_query_views_log.sql | 2 ++ 4 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql index 0154265ef72..907a8283396 100644 --- a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql +++ b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql @@ -1,5 +1,6 @@ SET session_timezone = 'UTC'; -- disable timezone randomization SET enable_analyzer = 1; -- The old path formats the result with different whitespaces +SET output_format_pretty_highlight_digit_groups = 0; SELECT '-- Negative tests'; SELECT dateTimeToSnowflakeID(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql index 41e5beb9c16..1f62f3d36da 100644 --- a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql @@ -1,5 +1,6 @@ SET session_timezone = 'UTC'; -- disable timezone randomization SET enable_analyzer = 1; -- The old path formats the result with different whitespaces +SET output_format_pretty_highlight_digit_groups = 0; SELECT '-- Negative tests'; SELECT snowflakeIDToDateTime(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01958_partial_hour_timezone.sql b/tests/queries/0_stateless/01958_partial_hour_timezone.sql index 26350e55620..b72adfd9d58 100644 --- a/tests/queries/0_stateless/01958_partial_hour_timezone.sql +++ b/tests/queries/0_stateless/01958_partial_hour_timezone.sql @@ -1,3 +1,5 @@ +SET output_format_pretty_highlight_digit_groups = 0; + -- Appeared in https://github.com/ClickHouse/ClickHouse/pull/26978#issuecomment-890889362 WITH toDateTime('1970-06-17 07:39:21', 'Africa/Monrovia') as t SELECT toUnixTimestamp(t), diff --git a/tests/queries/0_stateless/02125_query_views_log.sql b/tests/queries/0_stateless/02125_query_views_log.sql index ba50902ebea..96170efedd6 100644 --- a/tests/queries/0_stateless/02125_query_views_log.sql +++ b/tests/queries/0_stateless/02125_query_views_log.sql @@ -1,3 +1,5 @@ +SET output_format_pretty_highlight_digit_groups = 0; + drop table if exists src; drop table if exists dst; drop table if exists mv1; From 19ca58e95203ed2eed71a6ef0ab88677d9bb6b93 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 02:29:44 +0100 Subject: [PATCH 346/566] Fix #71677 --- src/Functions/nested.cpp | 19 +++++++++++++------ .../03268_nested_analyzer.reference | 3 +++ .../0_stateless/03268_nested_analyzer.sql | 16 ++++++++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/03268_nested_analyzer.reference create mode 100644 tests/queries/0_stateless/03268_nested_analyzer.sql diff --git a/src/Functions/nested.cpp b/src/Functions/nested.cpp index 85c342b5e7c..29d99b8a6df 100644 --- a/src/Functions/nested.cpp +++ b/src/Functions/nested.cpp @@ -108,27 +108,29 @@ public: { size_t arguments_size = arguments.size(); - const auto * lhs_array = assert_cast(arguments.at(1).column.get()); + ColumnPtr first_array_materialized = arguments[1].column->convertToFullColumnIfConst(); + const ColumnArray & first_array = assert_cast(*first_array_materialized); Columns data_columns; data_columns.reserve(arguments_size); - data_columns.push_back(lhs_array->getDataPtr()); + data_columns.push_back(first_array.getDataPtr()); for (size_t i = 2; i < arguments_size; ++i) { - const auto * rhs_array = assert_cast(arguments[i].column.get()); + ColumnPtr other_array_materialized = arguments[i].column->convertToFullColumnIfConst(); + const ColumnArray & other_array = assert_cast(*other_array_materialized); - if (!lhs_array->hasEqualOffsets(*rhs_array)) + if (!first_array.hasEqualOffsets(other_array)) throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "The argument 2 and argument {} of function {} have different array offsets", i + 1, getName()); - data_columns.push_back(rhs_array->getDataPtr()); + data_columns.push_back(other_array.getDataPtr()); } auto tuple_column = ColumnTuple::create(std::move(data_columns)); - auto array_column = ColumnArray::create(std::move(tuple_column), lhs_array->getOffsetsPtr()); + auto array_column = ColumnArray::create(std::move(tuple_column), first_array.getOffsetsPtr()); return array_column; } @@ -168,7 +170,12 @@ REGISTER_FUNCTION(Nested) { factory.registerFunction(FunctionDocumentation{ .description=R"( +This is a function used internally by the ClickHouse engine and not meant to be used directly. + Returns the array of tuples from multiple arrays. + +The first argument must be a constant array of Strings determining the names of the resulting Tuple. +The other arguments must be arrays of the same size. )", .examples{{"nested", "SELECT nested(['keys', 'values'], ['key_1', 'key_2'], ['value_1','value_2'])", ""}}, .categories{"OtherFunctions"} diff --git a/tests/queries/0_stateless/03268_nested_analyzer.reference b/tests/queries/0_stateless/03268_nested_analyzer.reference new file mode 100644 index 00000000000..01dabfe4ba7 --- /dev/null +++ b/tests/queries/0_stateless/03268_nested_analyzer.reference @@ -0,0 +1,3 @@ +[(1,3),(2,4)] +0 0 +0 0 1 diff --git a/tests/queries/0_stateless/03268_nested_analyzer.sql b/tests/queries/0_stateless/03268_nested_analyzer.sql new file mode 100644 index 00000000000..920cf2b3174 --- /dev/null +++ b/tests/queries/0_stateless/03268_nested_analyzer.sql @@ -0,0 +1,16 @@ +SELECT nested(['a', 'b'], [1, 2], materialize([3, 4])); + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt8, + “struct.x” DEFAULT [0], + “struct.y” ALIAS [1], +) +ENGINE = Memory; + +insert into test (x) values (0); +select * from test array join struct; +select x, struct.x, struct.y from test array join struct; + +DROP TABLE test; From b5237313adaac770c95b8c9415a01c23b1372f66 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 02:49:27 +0100 Subject: [PATCH 347/566] Fix tests --- tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql | 2 +- tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql | 2 +- tests/queries/0_stateless/01958_partial_hour_timezone.sql | 2 +- tests/queries/0_stateless/02125_query_views_log.sql | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql index 907a8283396..aeaf48716dc 100644 --- a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql +++ b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql @@ -1,6 +1,6 @@ SET session_timezone = 'UTC'; -- disable timezone randomization SET enable_analyzer = 1; -- The old path formats the result with different whitespaces -SET output_format_pretty_highlight_digit_groups = 0; +SET output_format_pretty_single_large_number_tip_threshold = 0; SELECT '-- Negative tests'; SELECT dateTimeToSnowflakeID(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql index 1f62f3d36da..e9b32607837 100644 --- a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql @@ -1,6 +1,6 @@ SET session_timezone = 'UTC'; -- disable timezone randomization SET enable_analyzer = 1; -- The old path formats the result with different whitespaces -SET output_format_pretty_highlight_digit_groups = 0; +SET output_format_pretty_single_large_number_tip_threshold = 0; SELECT '-- Negative tests'; SELECT snowflakeIDToDateTime(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01958_partial_hour_timezone.sql b/tests/queries/0_stateless/01958_partial_hour_timezone.sql index b72adfd9d58..3eecaaf97e6 100644 --- a/tests/queries/0_stateless/01958_partial_hour_timezone.sql +++ b/tests/queries/0_stateless/01958_partial_hour_timezone.sql @@ -1,4 +1,4 @@ -SET output_format_pretty_highlight_digit_groups = 0; +SET output_format_pretty_single_large_number_tip_threshold = 0; -- Appeared in https://github.com/ClickHouse/ClickHouse/pull/26978#issuecomment-890889362 WITH toDateTime('1970-06-17 07:39:21', 'Africa/Monrovia') as t diff --git a/tests/queries/0_stateless/02125_query_views_log.sql b/tests/queries/0_stateless/02125_query_views_log.sql index 96170efedd6..08e9c73a165 100644 --- a/tests/queries/0_stateless/02125_query_views_log.sql +++ b/tests/queries/0_stateless/02125_query_views_log.sql @@ -1,4 +1,4 @@ -SET output_format_pretty_highlight_digit_groups = 0; +SET output_format_pretty_single_large_number_tip_threshold = 0; drop table if exists src; drop table if exists dst; From 959c4633f9e8cbc3f41def853fa62618fba604c6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 02:53:32 +0100 Subject: [PATCH 348/566] Apply review suggestion --- src/Formats/PrettyFormatHelpers.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/Formats/PrettyFormatHelpers.cpp b/src/Formats/PrettyFormatHelpers.cpp index 6e2af036651..4ee4b49521d 100644 --- a/src/Formats/PrettyFormatHelpers.cpp +++ b/src/Formats/PrettyFormatHelpers.cpp @@ -5,6 +5,11 @@ #include +static constexpr const char * GRAY_COLOR = "\033[90m"; +static constexpr const char * UNDERSCORE = "\033[4m"; +static constexpr const char * RESET_COLOR = "\033[0m"; + + namespace DB { @@ -25,11 +30,11 @@ void writeReadableNumberTip(WriteBuffer & out, const IColumn & column, size_t ro if (threshold && isFinite(value) && abs(value) > threshold) { if (color) - writeCString("\033[90m", out); + writeCString(GRAY_COLOR, out); writeCString(" -- ", out); formatReadableQuantity(value, out, 2); if (color) - writeCString("\033[0m", out); + writeCString(RESET_COLOR, out); } } @@ -76,9 +81,9 @@ String highlightDigitGroups(String source) size_t offset = num_digits_before_decimal - digit_num; if (offset && offset % 3 == 0) { - result += "\033[4m"; + result += UNDERSCORE; result += c; - result += "\033[0m"; + result += RESET_COLOR; } else { From ef0ec74d2bfe8ae61a06e0c3fa4e33fb3c094ef6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 04:50:18 +0100 Subject: [PATCH 349/566] Fix build --- src/Processors/Formats/Impl/PrettyBlockOutputFormat.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 824a2fd2e6f..81bd0e6632d 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -55,8 +55,6 @@ protected: } bool color; - -protected: bool readable_number_tip = false; private: From bf58f468082917f871dc706f8596020d0364b43e Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sat, 9 Nov 2024 13:04:39 +0800 Subject: [PATCH 350/566] Fix empty tuple ALTER --- src/Functions/FunctionsComparison.h | 3 +++ src/Functions/if.cpp | 3 +++ .../0_stateless/03268_empty_tuple_update.reference | 1 + .../queries/0_stateless/03268_empty_tuple_update.sql | 11 +++++++++++ 4 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/03268_empty_tuple_update.reference create mode 100644 tests/queries/0_stateless/03268_empty_tuple_update.sql diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index be0875581a5..9b2328065fc 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -1033,6 +1033,9 @@ private: size_t tuple_size, size_t input_rows_count) const { + if (0 == tuple_size) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Comparison of zero-sized tuples is not implemented"); + ColumnsWithTypeAndName less_columns(tuple_size); ColumnsWithTypeAndName equal_columns(tuple_size - 1); ColumnsWithTypeAndName tmp_columns(2); diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index e03b27b3c39..5e1e7067e86 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -668,6 +668,9 @@ private: temporary_columns[0] = arguments[0]; size_t tuple_size = type1.getElements().size(); + if (tuple_size == 0) + return ColumnTuple::create(input_rows_count); + Columns tuple_columns(tuple_size); for (size_t i = 0; i < tuple_size; ++i) diff --git a/tests/queries/0_stateless/03268_empty_tuple_update.reference b/tests/queries/0_stateless/03268_empty_tuple_update.reference new file mode 100644 index 00000000000..30bc45d7a18 --- /dev/null +++ b/tests/queries/0_stateless/03268_empty_tuple_update.reference @@ -0,0 +1 @@ +() 2 diff --git a/tests/queries/0_stateless/03268_empty_tuple_update.sql b/tests/queries/0_stateless/03268_empty_tuple_update.sql new file mode 100644 index 00000000000..343117719fc --- /dev/null +++ b/tests/queries/0_stateless/03268_empty_tuple_update.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(), c1 int) ENGINE = Memory(); + +INSERT INTO t0 VALUES ((), 1); + +ALTER TABLE t0 UPDATE c0 = (), c1 = 2 WHERE EXISTS (SELECT 1); + +SELECT * FROM t0; + +DROP TABLE t0; From a888db338e1c79166a3ff6993b71d2fd17dc8736 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Sat, 9 Nov 2024 08:23:25 +0100 Subject: [PATCH 351/566] Revert "Add a new setting query_metric_log_debug to avoid the noise" This reverts commit 955f537bd5ef2f4a29717ac4999ce2af47b4c039. --- src/Core/Settings.cpp | 5 ----- src/Core/SettingsChangesHistory.cpp | 1 - src/Interpreters/QueryMetricLog.cpp | 18 +++++++----------- src/Interpreters/QueryMetricLog.h | 3 +-- src/Interpreters/executeQuery.cpp | 3 +-- .../03203_system_query_metric_log.sh | 10 +++++----- 6 files changed, 14 insertions(+), 26 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 2677bde4d55..6f0109fa300 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2787,11 +2787,6 @@ If set to any negative value, it will take the value `collect_interval_milliseco To disable the collection of a single query, set `query_metric_log_interval` to 0. Default value: -1 - )", 0) \ - DECLARE(Bool, query_metric_log_debug, false, R"( -Turns on debugging traces for system.query_metric_log - -Default value: false )", 0) \ DECLARE(LogsLevel, send_logs_level, LogsLevel::fatal, R"( Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none' diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 008980aae11..c6223bef2b2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -76,7 +76,6 @@ static std::initializer_listgetProcessList(); @@ -223,24 +222,21 @@ void QueryMetricLogStatus::scheduleNext(String query_id) } else { - if (debug_traces) - LOG_DEBUG(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", - query_id, timePointToString(next_collect_time)); + LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); task->schedule(); } } std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - if (debug_traces) - LOG_DEBUG(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), - schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { - if (debug_traces) - LOG_DEBUG(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); + LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 5f301b2cd13..65764229b0a 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -51,7 +51,6 @@ struct QueryMetricLogStatus std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); - bool debug_traces = false; /// We need to be able to move it for the hash map, so we need to add an indirection here. std::unique_ptr mutex = std::make_unique(); @@ -79,7 +78,7 @@ public: void shutdown() final; /// Both startQuery and finishQuery are called from the thread that executes the query. - void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds, bool debug_traces = false); + void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 794d3dab0e6..4507126b7b3 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -146,7 +146,6 @@ namespace Setting extern const SettingsQueryCacheSystemTableHandling query_cache_system_table_handling; extern const SettingsSeconds query_cache_ttl; extern const SettingsInt64 query_metric_log_interval; - extern const SettingsBool query_metric_log_debug; extern const SettingsOverflowMode read_overflow_mode; extern const SettingsOverflowMode read_overflow_mode_leaf; extern const SettingsOverflowMode result_overflow_mode; @@ -456,7 +455,7 @@ QueryLogElement logQueryStart( { auto interval_milliseconds = getQueryMetricLogInterval(context); if (interval_milliseconds > 0) - query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds, settings[Setting::query_metric_log_debug]); + query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds); } return elem; diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index 4bc764b777c..abcd14c8e5d 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -6,11 +6,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) readonly query_prefix=$CLICKHOUSE_DATABASE -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=400 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=123 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=0 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) SETTINGS query_metric_log_debug=true FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=400 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=123 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=0 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) FORMAT Null" & wait From 516300e733c8b3a116139a0306797f779b818f56 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Sat, 9 Nov 2024 08:28:47 +0100 Subject: [PATCH 352/566] Demote log from warning to debug to avoid failing the test --- src/Interpreters/QueryMetricLog.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index 4fbe4f9e1b5..62700f49605 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -107,6 +107,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer const auto query_info = process_list.getQueryInfo(query_id, false, true, false); if (!query_info) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); return; } @@ -118,6 +119,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer if (it == queries.end()) { global_lock.unlock(); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} not found in the list. Finished while this collecting task was running", query_id); return; } @@ -126,6 +128,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer if (!query_status.mutex) { global_lock.unlock(); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} finished while this collecting task was running", query_id); return; } @@ -230,12 +233,14 @@ void QueryMetricLogStatus::scheduleNext(String query_id) std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } @@ -278,7 +283,8 @@ std::optional QueryMetricLogStatus::createLogMetricElemen } else { - LOG_WARNING(logger, "Query {} has no profile counters", query_id); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI + LOG_DEBUG(logger, "Query {} has no profile counters", query_id); elem.profile_events = std::vector(ProfileEvents::end()); } From e50bbc433e5c57c96bbf71e22b900a28eb5be6c5 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 22:08:08 +0100 Subject: [PATCH 353/566] Another review round for docker-library/docs --- docker/server/README.md | 4 ++-- docker/server/README.src/content.md | 4 ++-- docker/server/README.src/github-repo | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/server/README.md b/docker/server/README.md index 7403d5b0b2a..5f6144d0633 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -30,7 +30,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). -- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications. ## How to use this image @@ -57,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in ### connect to it using curl ```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- ``` More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). diff --git a/docker/server/README.src/content.md b/docker/server/README.src/content.md index bfc1a271546..df0b6718d69 100644 --- a/docker/server/README.src/content.md +++ b/docker/server/README.src/content.md @@ -24,7 +24,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). -- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications. ## How to use this image @@ -51,7 +51,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in ### connect to it using curl ```bash -echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- +echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @- ``` More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/). diff --git a/docker/server/README.src/github-repo b/docker/server/README.src/github-repo index dc2b6635325..70a009ec958 100644 --- a/docker/server/README.src/github-repo +++ b/docker/server/README.src/github-repo @@ -1 +1 @@ -https://github.com/ClickHouse/docker-library +https://github.com/ClickHouse/ClickHouse From aa4d37f72cbea7834c8a8c8d6668f3b3b01b80a7 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 9 Nov 2024 13:41:08 +0100 Subject: [PATCH 354/566] Fix test --- .../0_stateless/02117_show_create_table_system.reference | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 2ea62444cff..ef5a2c6665f 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -485,6 +485,8 @@ CREATE TABLE system.parts `data_version` UInt64, `primary_key_bytes_in_memory` UInt64, `primary_key_bytes_in_memory_allocated` UInt64, + `index_granularity_bytes_in_memory` UInt64, + `index_granularity_bytes_in_memory_allocated` UInt64, `is_frozen` UInt8, `database` String, `table` String, From 7b1c72729a4f8e37d7c6ddf4fc8894149085e3e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 14:22:43 +0100 Subject: [PATCH 355/566] Fix upgrade check --- tests/docker_scripts/upgrade_runner.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/docker_scripts/upgrade_runner.sh b/tests/docker_scripts/upgrade_runner.sh index ece75ebf782..92484f88ece 100755 --- a/tests/docker_scripts/upgrade_runner.sh +++ b/tests/docker_scripts/upgrade_runner.sh @@ -135,7 +135,7 @@ IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM if [ "${IS_SANITIZED}" -eq "0" ] then save_settings_clean 'new_settings.native' - clickhouse-local -nmq " + clickhouse-local --implicit-select 0 -nmq " CREATE TABLE old_settings AS file('old_settings.native'); CREATE TABLE old_version AS file('old_version.native'); CREATE TABLE new_settings AS file('new_settings.native'); @@ -147,7 +147,6 @@ then FROM new_settings LEFT JOIN old_settings ON new_settings.name = old_settings.name WHERE (new_value != old_value) - AND NOT (startsWith(new_value, 'auto(') AND old_value LIKE '%auto(%') AND (name NOT IN ( SELECT arrayJoin(tupleElement(changes, 'name')) FROM From 93d586876092ae662f510d67e1ad00e6d1d55bdf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 14:30:46 +0100 Subject: [PATCH 356/566] Fix tests --- .../02751_ip_types_aggregate_functions_states.sql.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 index 7d030d4be2d..602b98e576b 100644 --- a/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 +++ b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 @@ -1,5 +1,7 @@ -- Tags: no-parallel, no-fasttest +SET output_format_pretty_single_large_number_tip_threshold = 0; + {# this test checks backward compatibility of aggregate functions States against IPv4, IPv6 types #} {% set ip4_generator = "select num::UInt32::IPv4 ip from (select arrayJoin(range(999999999, number)) as num from numbers(999999999,50)) order by ip" %} From 016c122af9d85da32726d9bb0d2b318eda06c2e8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 16:33:37 +0100 Subject: [PATCH 357/566] Update PULL_REQUEST_TEMPLATE.md --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3dcce68ab46..976c69d3c34 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py - Backward Incompatible Change - Build/Testing/Packaging Improvement - Documentation (changelog entry is not required) -- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +- Critical Bug Fix (crash, data loss, RBAC) - Bug Fix (user-visible misbehavior in an official stable release) - CI Fix or Improvement (changelog entry is not required) - Not for changelog (changelog entry is not required) From a898f163546b3cb3d607443e9881c88337867e83 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 17:21:53 +0100 Subject: [PATCH 358/566] Fix tests --- tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql b/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql index e7d1909cae6..22b59a16255 100644 --- a/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql +++ b/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql @@ -1,5 +1,6 @@ -- Tags: no-fasttest +SET output_format_pretty_single_large_number_tip_threshold = 0; SET enable_analyzer = 1; SELECT From 7849a9ce16d8a8ed9d97e3b57541c108bb00d044 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 18:12:11 +0100 Subject: [PATCH 359/566] Fix error --- tests/docker_scripts/upgrade_runner.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/docker_scripts/upgrade_runner.sh b/tests/docker_scripts/upgrade_runner.sh index 92484f88ece..15c5ab69521 100755 --- a/tests/docker_scripts/upgrade_runner.sh +++ b/tests/docker_scripts/upgrade_runner.sh @@ -63,7 +63,7 @@ install_packages previous_release_package_folder function save_settings_clean() { local out=$1 && shift - script -q -c "clickhouse-local -q \"select * from system.settings into outfile '$out'\"" --log-out /dev/null + script -q -c "clickhouse-local --implicit-select 0 -q \"select * from system.settings into outfile '$out'\"" --log-out /dev/null } # We save the (numeric) version of the old server to compare setting changes between the 2 @@ -135,7 +135,7 @@ IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM if [ "${IS_SANITIZED}" -eq "0" ] then save_settings_clean 'new_settings.native' - clickhouse-local --implicit-select 0 -nmq " + clickhouse-local -nmq " CREATE TABLE old_settings AS file('old_settings.native'); CREATE TABLE old_version AS file('old_version.native'); CREATE TABLE new_settings AS file('new_settings.native'); From 979b2128067e44e92bb738b491ebabaa0f41cbeb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jan 2024 19:36:35 +0100 Subject: [PATCH 360/566] Make higher order functions constant expressions --- src/Functions/FunctionsMiscellaneous.h | 14 +++++++++++++- ...961_higher_order_constant_expressions.reference | 8 ++++++++ .../02961_higher_order_constant_expressions.sql | 11 +++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02961_higher_order_constant_expressions.reference create mode 100644 tests/queries/0_stateless/02961_higher_order_constant_expressions.sql diff --git a/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h index fb5109eaa88..4b189279651 100644 --- a/src/Functions/FunctionsMiscellaneous.h +++ b/src/Functions/FunctionsMiscellaneous.h @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -122,12 +123,19 @@ public: String getName() const override { return "FunctionCapture"; } bool useDefaultImplementationForNulls() const override { return false; } + /// It's possible if expression_actions contains function that don't use /// default implementation for Nothing and one of captured columns can be Nothing /// Example: SELECT arrayMap(x -> [x, arrayElement(y, 0)], []), [] as y bool useDefaultImplementationForNothing() const override { return false; } bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + /// If all the captured arguments are constant, let's also return ColumnConst (with ColumnFunction inside it). + /// Consequently, it allows to treat higher order functions with constant arrays and constant captured columns + /// as constant expressions. + /// Consequently, it allows its usage in contexts requiring constants, such as the right hand side of IN. + bool useDefaultImplementationForConstants() const override { return true; } + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { Names names; @@ -148,7 +156,11 @@ public: auto function = std::make_unique(expression_actions, types, names, capture->return_type, capture->return_name); - return ColumnFunction::create(input_rows_count, std::move(function), arguments); + /// If there are no captured columns, the result is constant. + if (arguments.empty()) + return ColumnConst::create(ColumnFunction::create(1, std::move(function), arguments), input_rows_count); + else + return ColumnFunction::create(input_rows_count, std::move(function), arguments); } private: diff --git a/tests/queries/0_stateless/02961_higher_order_constant_expressions.reference b/tests/queries/0_stateless/02961_higher_order_constant_expressions.reference new file mode 100644 index 00000000000..058d23ad850 --- /dev/null +++ b/tests/queries/0_stateless/02961_higher_order_constant_expressions.reference @@ -0,0 +1,8 @@ +[1,2,3] 1 +[2,3,4] 1 +[2,4,6] 1 +[5,7,9] 1 +[1,1,1] 1 +[1,2,3] 0 +[0,0,0] 0 +3 1 diff --git a/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql b/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql new file mode 100644 index 00000000000..47480010751 --- /dev/null +++ b/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql @@ -0,0 +1,11 @@ +SELECT arrayMap(x -> x, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + 1, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + x, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap((x, y) -> x + y, [1, 2, 3], [4, 5, 6]) AS x, isConstant(x); +SELECT arrayMap(x -> 1, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + number, [1, 2, 3]) AS x, isConstant(x) FROM numbers(1); +SELECT arrayMap(x -> number, [1, 2, 3]) AS x, isConstant(x) FROM numbers(1); +SELECT arrayMax([1, 2, 3]) AS x, isConstant(x); + +-- Does not work yet: +-- SELECT [1, 2, 3] IN arrayMap(x -> x, [1, 2, 3]); From 3f2f358fb9c7e8b01e32632684c1dea24c1fc67a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jan 2024 20:32:27 +0100 Subject: [PATCH 361/566] Support constant lambda functions --- src/Functions/array/FunctionArrayMapped.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h index f4832431f04..e51c465f883 100644 --- a/src/Functions/array/FunctionArrayMapped.h +++ b/src/Functions/array/FunctionArrayMapped.h @@ -282,7 +282,9 @@ public: if (!column_with_type_and_name.column) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function.", getName()); - const auto * column_function = typeid_cast(column_with_type_and_name.column.get()); + auto column_function_materialized = column_with_type_and_name.column->convertToFullColumnIfConst(); + + const auto * column_function = typeid_cast(column_function_materialized.get()); if (!column_function) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function.", getName()); From 6c1016568c4e76e2285a5a73d5bbfc7c3d0824a5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 03:35:21 +0100 Subject: [PATCH 362/566] Better implementation --- src/Functions/FunctionsMiscellaneous.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h index 4b189279651..6e89c4dd65d 100644 --- a/src/Functions/FunctionsMiscellaneous.h +++ b/src/Functions/FunctionsMiscellaneous.h @@ -130,12 +130,6 @@ public: bool useDefaultImplementationForNothing() const override { return false; } bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } - /// If all the captured arguments are constant, let's also return ColumnConst (with ColumnFunction inside it). - /// Consequently, it allows to treat higher order functions with constant arrays and constant captured columns - /// as constant expressions. - /// Consequently, it allows its usage in contexts requiring constants, such as the right hand side of IN. - bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { Names names; @@ -156,8 +150,13 @@ public: auto function = std::make_unique(expression_actions, types, names, capture->return_type, capture->return_name); - /// If there are no captured columns, the result is constant. - if (arguments.empty()) + /// If all the captured arguments are constant, let's also return ColumnConst (with ColumnFunction inside it). + /// Consequently, it allows to treat higher order functions with constant arrays and constant captured columns + /// as constant expressions. + /// Consequently, it allows its usage in contexts requiring constants, such as the right hand side of IN. + bool all_arguments_are_constant = std::all_of(arguments.begin(), arguments.end(), [](const auto & arg) { return arg.column->isConst(); }); + + if (all_arguments_are_constant) return ColumnConst::create(ColumnFunction::create(1, std::move(function), arguments), input_rows_count); else return ColumnFunction::create(input_rows_count, std::move(function), arguments); From f1777b957946a5ab32851d8e1c7000ee3657e3d7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 03:55:18 +0100 Subject: [PATCH 363/566] Fix error --- src/Functions/FunctionsMiscellaneous.h | 10 +++++++++- src/Processors/Formats/IOutputFormat.cpp | 2 +- src/Processors/Transforms/DistinctTransform.cpp | 2 +- src/QueryPipeline/RemoteQueryExecutor.cpp | 2 +- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h index 6e89c4dd65d..62b43386db5 100644 --- a/src/Functions/FunctionsMiscellaneous.h +++ b/src/Functions/FunctionsMiscellaneous.h @@ -157,9 +157,17 @@ public: bool all_arguments_are_constant = std::all_of(arguments.begin(), arguments.end(), [](const auto & arg) { return arg.column->isConst(); }); if (all_arguments_are_constant) - return ColumnConst::create(ColumnFunction::create(1, std::move(function), arguments), input_rows_count); + { + ColumnsWithTypeAndName arguments_resized = arguments; + for (auto & elem : arguments_resized) + elem.column = elem.column->cloneResized(1); + + return ColumnConst::create(ColumnFunction::create(1, std::move(function), arguments_resized), input_rows_count); + } else + { return ColumnFunction::create(input_rows_count, std::move(function), arguments); + } } private: diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 97628778adb..947de45f852 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -55,7 +55,7 @@ static Chunk prepareTotals(Chunk chunk) /// Skip rows except the first one. auto columns = chunk.detachColumns(); for (auto & column : columns) - column = column->cut(0, 1); + column = column->cloneResized(1); chunk.setColumns(std::move(columns), 1); } diff --git a/src/Processors/Transforms/DistinctTransform.cpp b/src/Processors/Transforms/DistinctTransform.cpp index d528303a642..53ee2c52884 100644 --- a/src/Processors/Transforms/DistinctTransform.cpp +++ b/src/Processors/Transforms/DistinctTransform.cpp @@ -64,7 +64,7 @@ void DistinctTransform::transform(Chunk & chunk) if (unlikely(key_columns_pos.empty())) { for (auto & column : columns) - column = column->cut(0, 1); + column = column->cloneResized(1); chunk.setColumns(std::move(columns), 1); stopReading(); diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 5faae03bc8f..401b3d36f83 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -327,7 +327,7 @@ static Block adaptBlockStructure(const Block & block, const Block & header) /// TODO: check that column contains the same value. /// TODO: serialize const columns. auto col = block.getByName(elem.name); - col.column = block.getByName(elem.name).column->cut(0, 1); + col.column = block.getByName(elem.name).column->cloneResized(1); column = castColumn(col, elem.type); From 4ae7d589f7f09db1488618f47c8137ac6aca0d01 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 03:56:23 +0100 Subject: [PATCH 364/566] Fix error --- src/Processors/Formats/IOutputFormat.cpp | 2 +- src/Processors/Transforms/DistinctTransform.cpp | 2 +- src/QueryPipeline/RemoteQueryExecutor.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 947de45f852..97628778adb 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -55,7 +55,7 @@ static Chunk prepareTotals(Chunk chunk) /// Skip rows except the first one. auto columns = chunk.detachColumns(); for (auto & column : columns) - column = column->cloneResized(1); + column = column->cut(0, 1); chunk.setColumns(std::move(columns), 1); } diff --git a/src/Processors/Transforms/DistinctTransform.cpp b/src/Processors/Transforms/DistinctTransform.cpp index 53ee2c52884..d528303a642 100644 --- a/src/Processors/Transforms/DistinctTransform.cpp +++ b/src/Processors/Transforms/DistinctTransform.cpp @@ -64,7 +64,7 @@ void DistinctTransform::transform(Chunk & chunk) if (unlikely(key_columns_pos.empty())) { for (auto & column : columns) - column = column->cloneResized(1); + column = column->cut(0, 1); chunk.setColumns(std::move(columns), 1); stopReading(); diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 401b3d36f83..5faae03bc8f 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -327,7 +327,7 @@ static Block adaptBlockStructure(const Block & block, const Block & header) /// TODO: check that column contains the same value. /// TODO: serialize const columns. auto col = block.getByName(elem.name); - col.column = block.getByName(elem.name).column->cloneResized(1); + col.column = block.getByName(elem.name).column->cut(0, 1); column = castColumn(col, elem.type); From 1e64b56a0f7f9cf0e7d209db4af33511d440954d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 04:18:32 +0100 Subject: [PATCH 365/566] Support constexpr functions in arrayFold --- src/Functions/array/arrayFold.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index 483a5d6404b..a9635f82db4 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -87,7 +87,9 @@ public: if (!lambda_function_with_type_and_name.column) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function", getName()); - const auto * lambda_function = typeid_cast(lambda_function_with_type_and_name.column.get()); + auto lambda_function_materialized = lambda_function_with_type_and_name.column->convertToFullColumnIfConst(); + + const auto * lambda_function = typeid_cast(lambda_function_materialized.get()); if (!lambda_function) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function", getName()); From 4334a149735742c154e9381ea9b997bb25f03dbd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 04:20:13 +0100 Subject: [PATCH 366/566] Fix test --- tests/queries/0_stateless/01284_fuzz_bits.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01284_fuzz_bits.sql b/tests/queries/0_stateless/01284_fuzz_bits.sql index 95a07c7bd44..1055d2aa580 100644 --- a/tests/queries/0_stateless/01284_fuzz_bits.sql +++ b/tests/queries/0_stateless/01284_fuzz_bits.sql @@ -18,7 +18,7 @@ FROM reinterpretAsUInt8( substring( fuzzBits( - arrayStringConcat(arrayMap(x -> toString('\0'), range(10000))), + materialize(arrayStringConcat(arrayMap(x -> toString('\0'), range(10000)))), 0.3 ), id + 1, From f2d45ba43b1d846dfb27d9f1b15b15dddb59c930 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 04:35:42 +0100 Subject: [PATCH 367/566] Fix tests --- src/Storages/MergeTree/KeyCondition.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 17723d341fb..c6497660386 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -597,12 +597,15 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( case (ActionsDAG::ActionType::COLUMN): { String name; - if (const auto * column_const = typeid_cast(node.column.get())) + if (const auto * column_const = typeid_cast(node.column.get()); + column_const && column_const->getDataType() != TypeIndex::Function) + { /// Re-generate column name for constant. - /// DAG form query (with enabled analyzer) uses suffixes for constants, like 1_UInt8. - /// DAG from PK does not use it. This breaks matching by column name sometimes. + /// DAG from the query (with enabled analyzer) uses suffixes for constants, like 1_UInt8. + /// DAG from the PK does not use it. This breaks matching by column name sometimes. /// Ideally, we should not compare names, but DAG subtrees instead. - name = ASTLiteral(column_const->getDataColumn()[0]).getColumnName(); + name = ASTLiteral(column_const->getField()).getColumnName(); + } else name = node.result_name; From ce8ffaf5c344c924f01697c543632068895d3bb9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 9 Nov 2024 23:54:37 +0100 Subject: [PATCH 368/566] Miscellaneous --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 16 +++++++--------- src/Columns/ColumnFunction.cpp | 20 ++++++++++++++++++++ src/Columns/ColumnFunction.h | 10 ++-------- src/Functions/IFunction.h | 2 +- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 4bb283cbf3e..390418494e7 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -3023,9 +3022,10 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi argument_column.name = arguments_projection_names[function_argument_index]; /** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction - * where function argument types are initialized with empty array of lambda arguments size. + * where function argument types are initialized with empty arrays of lambda arguments size. */ - if (const auto * lambda_node = function_argument->as()) + const auto * lambda_node = function_argument->as(); + if (lambda_node) { size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size(); argument_column.type = std::make_shared(DataTypes(lambda_arguments_size, nullptr), nullptr); @@ -3497,15 +3497,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi else function_base = function->build(argument_columns); - /// Do not constant fold get scalar functions - // bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" || - // function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort"; - /** If function is suitable for constant folding try to convert it to constant. * Example: SELECT plus(1, 1); * Result: SELECT 2; */ - if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding) + if (function_base->isSuitableForConstantFolding()) { auto result_type = function_base->getResultType(); auto executable_function = function_base->prepare(argument_columns); @@ -3514,7 +3510,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi if (all_arguments_constants) { - size_t num_rows = function_arguments.empty() ? 0 : argument_columns.front().column->size(); + size_t num_rows = 0; + if (!argument_columns.empty()) + num_rows = argument_columns.front().column->size(); column = executable_function->execute(argument_columns, result_type, num_rows, true); } else diff --git a/src/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp index 18c343c6ca6..cc80d04444e 100644 --- a/src/Columns/ColumnFunction.cpp +++ b/src/Columns/ColumnFunction.cpp @@ -72,6 +72,26 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled); } +Field ColumnFunction::operator[](size_t n) const +{ + Field res; + get(n, res); + return res; +} + +void ColumnFunction::get(size_t n, Field & res) const +{ + const size_t tuple_size = captured_columns.size(); + + res = Tuple(); + Tuple & res_tuple = res.safeGet(); + res_tuple.reserve(tuple_size); + + for (size_t i = 0; i < tuple_size; ++i) + res_tuple.push_back((*captured_columns[i].column)[n]); +} + + #if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFunction::insertFrom(const IColumn & src, size_t n) #else diff --git a/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h index b62c6bf70eb..8df9e23c0e8 100644 --- a/src/Columns/ColumnFunction.h +++ b/src/Columns/ColumnFunction.h @@ -60,15 +60,9 @@ public: void appendArguments(const ColumnsWithTypeAndName & columns); ColumnWithTypeAndName reduce() const; - Field operator[](size_t) const override - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName()); - } + Field operator[](size_t n) const override; - void get(size_t, Field &) const override - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot get value from {}", getName()); - } + void get(size_t n, Field & res) const override; StringRef getDataAt(size_t) const override { diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index c3ba4be7419..d0d6b02e69d 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -184,7 +184,7 @@ public: /** If function isSuitableForConstantFolding then, this method will be called during query analysis * if some arguments are constants. For example logical functions (AndFunction, OrFunction) can - * return they result based on some constant arguments. + * return the result based on some constant arguments. * Arguments are passed without modifications, useDefaultImplementationForNulls, useDefaultImplementationForNothing, * useDefaultImplementationForConstants, useDefaultImplementationForLowCardinality are not applied. */ From bc79d9bad3569a94b2755ab9ea3549ff8202148a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 00:01:42 +0100 Subject: [PATCH 369/566] Only with analyzer --- src/Functions/FunctionsMiscellaneous.h | 11 ++++++++--- src/Interpreters/ActionsVisitor.cpp | 2 +- src/Planner/PlannerActionsVisitor.cpp | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h index 62b43386db5..cea11cfe677 100644 --- a/src/Functions/FunctionsMiscellaneous.h +++ b/src/Functions/FunctionsMiscellaneous.h @@ -113,6 +113,7 @@ public: NamesAndTypesList lambda_arguments; String return_name; DataTypePtr return_type; + bool allow_constant_folding; }; using CapturePtr = std::shared_ptr; @@ -154,9 +155,11 @@ public: /// Consequently, it allows to treat higher order functions with constant arrays and constant captured columns /// as constant expressions. /// Consequently, it allows its usage in contexts requiring constants, such as the right hand side of IN. - bool all_arguments_are_constant = std::all_of(arguments.begin(), arguments.end(), [](const auto & arg) { return arg.column->isConst(); }); + bool constant_folding = capture->allow_constant_folding + && std::all_of(arguments.begin(), arguments.end(), + [](const auto & arg) { return arg.column->isConst(); }); - if (all_arguments_are_constant) + if (constant_folding) { ColumnsWithTypeAndName arguments_resized = arguments; for (auto & elem : arguments_resized) @@ -222,7 +225,8 @@ public: const Names & captured_names, const NamesAndTypesList & lambda_arguments, const DataTypePtr & function_return_type, - const String & expression_return_name) + const String & expression_return_name, + bool allow_constant_folding) : expression_actions(std::move(expression_actions_)) { /// Check that expression does not contain unusual actions that will break columns structure. @@ -265,6 +269,7 @@ public: .lambda_arguments = lambda_arguments, .return_name = expression_return_name, .return_type = function_return_type, + .allow_constant_folding = allow_constant_folding, }); } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 65c3fe8cfcf..696021b418c 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1308,7 +1308,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & String lambda_name = data.getUniqueName("__lambda"); auto function_capture = std::make_shared( - lambda_actions, captured, lambda_arguments, result_type, result_name); + lambda_actions, captured, lambda_arguments, result_type, result_name, false); data.addFunction(function_capture, captured, lambda_name); argument_types[i] = std::make_shared(lambda_type->getArgumentTypes(), result_type); diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index aa233109fa9..2cb2a242c35 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -804,7 +804,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi auto lambda_node_name = calculateActionNodeName(node, *planner_context); auto function_capture = std::make_shared( - lambda_actions, captured_column_names, lambda_arguments_names_and_types, lambda_node.getExpression()->getResultType(), lambda_expression_node_name); + lambda_actions, captured_column_names, lambda_arguments_names_and_types, lambda_node.getExpression()->getResultType(), lambda_expression_node_name, true); // TODO: Pass IFunctionBase here not FunctionCaptureOverloadResolver. const auto * actions_node = actions_stack[level].addFunctionIfNecessary(lambda_node_name, std::move(lambda_children), function_capture); From b70f39879d3740e671d5111854daad2b9397adc1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 00:02:24 +0100 Subject: [PATCH 370/566] Only with analyzer --- .../0_stateless/02961_higher_order_constant_expressions.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql b/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql index 47480010751..23b0b72f48f 100644 --- a/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql +++ b/tests/queries/0_stateless/02961_higher_order_constant_expressions.sql @@ -1,3 +1,5 @@ +SET enable_analyzer = 1; + SELECT arrayMap(x -> x, [1, 2, 3]) AS x, isConstant(x); SELECT arrayMap(x -> x + 1, [1, 2, 3]) AS x, isConstant(x); SELECT arrayMap(x -> x + x, [1, 2, 3]) AS x, isConstant(x); From c3f42b7bc770e5e8104527011f6bc51d5b8469ff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 01:25:25 +0100 Subject: [PATCH 371/566] Something --- cmake/linux/default_libs.cmake | 3 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- src/Columns/ColumnUnique.cpp | 1 + src/Columns/ColumnUnique.h | 1 + src/Columns/IColumn.cpp | 1 + src/Common/FieldVisitorConvertToNumber.cpp | 2 +- src/Common/FieldVisitorConvertToNumber.h | 1 + src/DataTypes/DataTypesBinaryEncoding.cpp | 5 + src/DataTypes/DataTypesNumber.cpp | 1 + src/DataTypes/DataTypesNumber.h | 1 + src/Formats/JSONExtractTree.cpp | 4 +- src/Functions/FunctionBinaryArithmetic.h | 1 + src/Functions/FunctionsConversion.cpp | 1 + src/Functions/FunctionsRound.h | 2 +- src/IO/readFloatText.cpp | 9 ++ src/IO/readFloatText.h | 111 ++++++++++++++++-- .../Impl/Parquet/ParquetDataValuesReader.cpp | 2 + .../Impl/Parquet/ParquetLeafColReader.cpp | 1 + 18 files changed, 132 insertions(+), 17 deletions(-) diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 51620bc9f33..79875e1ed6b 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -3,8 +3,7 @@ set (DEFAULT_LIBS "-nodefaultlibs") -# We need builtins from Clang's RT even without libcxx - for ubsan+int128. -# See https://bugs.llvm.org/show_bug.cgi?id=16404 +# We need builtins from Clang execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 2668e0dc890..870cb429fb7 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -276,6 +276,6 @@ private: { if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnUnique.cpp b/src/Columns/ColumnUnique.cpp index 54f45204c00..773edbfd590 100644 --- a/src/Columns/ColumnUnique.cpp +++ b/src/Columns/ColumnUnique.cpp @@ -16,6 +16,7 @@ template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; +template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index ffa7c311e9e..ce7bbf0766f 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -760,6 +760,7 @@ extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; +extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; diff --git a/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp index c9a0514af4e..4a3886dddb6 100644 --- a/src/Columns/IColumn.cpp +++ b/src/Columns/IColumn.cpp @@ -443,6 +443,7 @@ template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; +template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; diff --git a/src/Common/FieldVisitorConvertToNumber.cpp b/src/Common/FieldVisitorConvertToNumber.cpp index 75b3fbfe02a..a5963e3d028 100644 --- a/src/Common/FieldVisitorConvertToNumber.cpp +++ b/src/Common/FieldVisitorConvertToNumber.cpp @@ -1,5 +1,4 @@ #include -#include "base/Decimal.h" namespace DB { @@ -17,6 +16,7 @@ template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; +//template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index 638b8805b6a..38d5dc473c4 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -129,6 +129,7 @@ extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; +//extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index dc0f2f3f5aa..c3190b462c3 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -96,6 +96,7 @@ enum class BinaryTypeIndex : uint8_t SimpleAggregateFunction = 0x2E, Nested = 0x2F, JSON = 0x30, + BFloat16 = 0x31, }; /// In future we can introduce more arguments in the JSON data type definition. @@ -151,6 +152,8 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) return BinaryTypeIndex::Int128; case TypeIndex::Int256: return BinaryTypeIndex::Int256; + case TypeIndex::BFloat16: + return BinaryTypeIndex::BFloat16; case TypeIndex::Float32: return BinaryTypeIndex::Float32; case TypeIndex::Float64: @@ -565,6 +568,8 @@ DataTypePtr decodeDataType(ReadBuffer & buf) return std::make_shared(); case BinaryTypeIndex::Int256: return std::make_shared(); + case BinaryTypeIndex::BFloat16: + return std::make_shared(); case BinaryTypeIndex::Float32: return std::make_shared(); case BinaryTypeIndex::Float64: diff --git a/src/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp index 5972cebbca1..4c8918521fe 100644 --- a/src/DataTypes/DataTypesNumber.cpp +++ b/src/DataTypes/DataTypesNumber.cpp @@ -112,6 +112,7 @@ template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; +template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; diff --git a/src/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h index 29899847c4b..a9e77e01b13 100644 --- a/src/DataTypes/DataTypesNumber.h +++ b/src/DataTypes/DataTypesNumber.h @@ -63,6 +63,7 @@ extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; +extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index ae6051823b7..62905a2e630 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -131,7 +131,7 @@ bool tryGetNumericValueFromJSONElement( switch (element.type()) { case ElementType::DOUBLE: - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { /// We permit inaccurate conversion of double to float. /// Example: double 0.1 from JSON is not representable in float. @@ -175,7 +175,7 @@ bool tryGetNumericValueFromJSONElement( return false; auto rb = ReadBufferFromMemory{element.getString()}; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (!tryReadFloatText(value, rb) || !rb.eof()) { diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index df239b820af..854b40df441 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -110,6 +110,7 @@ template constexpr bool IsIntegralOrExtendedOrDecimal = IsDataTypeDecimal; template constexpr bool IsFloatingPoint = false; +template <> inline constexpr bool IsFloatingPoint = true; template <> inline constexpr bool IsFloatingPoint = true; template <> inline constexpr bool IsFloatingPoint = true; diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 70ec390b576..1c662dd1d9a 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -2930,6 +2930,7 @@ template <> struct FunctionTo { using Type = FunctionToInt32; }; template <> struct FunctionTo { using Type = FunctionToInt64; }; template <> struct FunctionTo { using Type = FunctionToInt128; }; template <> struct FunctionTo { using Type = FunctionToInt256; }; +//template <> struct FunctionTo { using Type = FunctionToBFloat16; }; template <> struct FunctionTo { using Type = FunctionToFloat32; }; template <> struct FunctionTo { using Type = FunctionToFloat64; }; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 809905c692e..255eca5b406 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -694,7 +694,7 @@ public: if (arguments.size() > 1) { const ColumnWithTypeAndName & scale_column = arguments[1]; - res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); + res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); return true; } res = Dispatcher::template apply(value_arg.column.get()); diff --git a/src/IO/readFloatText.cpp b/src/IO/readFloatText.cpp index 17ccc1b25b7..fb3c86fd7b6 100644 --- a/src/IO/readFloatText.cpp +++ b/src/IO/readFloatText.cpp @@ -47,26 +47,35 @@ void assertNaN(ReadBuffer & buf) } +template void readFloatTextPrecise(BFloat16 &, ReadBuffer &); template void readFloatTextPrecise(Float32 &, ReadBuffer &); template void readFloatTextPrecise(Float64 &, ReadBuffer &); +template bool tryReadFloatTextPrecise(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextPrecise(Float32 &, ReadBuffer &); template bool tryReadFloatTextPrecise(Float64 &, ReadBuffer &); +template void readFloatTextFast(BFloat16 &, ReadBuffer &); template void readFloatTextFast(Float32 &, ReadBuffer &); template void readFloatTextFast(Float64 &, ReadBuffer &); +template bool tryReadFloatTextFast(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextFast(Float32 &, ReadBuffer &); template bool tryReadFloatTextFast(Float64 &, ReadBuffer &); +template void readFloatTextSimple(BFloat16 &, ReadBuffer &); template void readFloatTextSimple(Float32 &, ReadBuffer &); template void readFloatTextSimple(Float64 &, ReadBuffer &); +template bool tryReadFloatTextSimple(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextSimple(Float32 &, ReadBuffer &); template bool tryReadFloatTextSimple(Float64 &, ReadBuffer &); +template void readFloatText(BFloat16 &, ReadBuffer &); template void readFloatText(Float32 &, ReadBuffer &); template void readFloatText(Float64 &, ReadBuffer &); +template bool tryReadFloatText(BFloat16 &, ReadBuffer &); template bool tryReadFloatText(Float32 &, ReadBuffer &); template bool tryReadFloatText(Float64 &, ReadBuffer &); +template bool tryReadFloatTextNoExponent(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextNoExponent(Float32 &, ReadBuffer &); template bool tryReadFloatTextNoExponent(Float64 &, ReadBuffer &); diff --git a/src/IO/readFloatText.h b/src/IO/readFloatText.h index c2fec9d4b0b..a7fd6058dd9 100644 --- a/src/IO/readFloatText.h +++ b/src/IO/readFloatText.h @@ -222,7 +222,6 @@ ReturnType readFloatTextPreciseImpl(T & x, ReadBuffer & buf) break; } - char tmp_buf[MAX_LENGTH]; int num_copied_chars = 0; @@ -597,22 +596,85 @@ ReturnType readFloatTextSimpleImpl(T & x, ReadBuffer & buf) return ReturnType(true); } -template void readFloatTextPrecise(T & x, ReadBuffer & in) { readFloatTextPreciseImpl(x, in); } -template bool tryReadFloatTextPrecise(T & x, ReadBuffer & in) { return readFloatTextPreciseImpl(x, in); } +template void readFloatTextPrecise(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextPreciseImpl(tmp, in); + x = BFloat16(tmp); + } + else + readFloatTextPreciseImpl(x, in); +} + +template bool tryReadFloatTextPrecise(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextPreciseImpl(tmp, in); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextPreciseImpl(x, in); +} template void readFloatTextFast(T & x, ReadBuffer & in) { bool has_fractional; - readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextFastImpl(tmp, in, has_fractional); + x = BFloat16(tmp); + } + else + readFloatTextFastImpl(x, in, has_fractional); } + template bool tryReadFloatTextFast(T & x, ReadBuffer & in) { bool has_fractional; - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } -template void readFloatTextSimple(T & x, ReadBuffer & in) { readFloatTextSimpleImpl(x, in); } -template bool tryReadFloatTextSimple(T & x, ReadBuffer & in) { return readFloatTextSimpleImpl(x, in); } +template void readFloatTextSimple(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextSimpleImpl(tmp, in); + x = BFloat16(tmp); + } + else + readFloatTextSimpleImpl(x, in); +} + +template bool tryReadFloatTextSimple(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextSimpleImpl(tmp, in); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextSimpleImpl(x, in); +} /// Implementation that is selected as default. @@ -624,18 +686,47 @@ template bool tryReadFloatText(T & x, ReadBuffer & in) { return try template bool tryReadFloatTextNoExponent(T & x, ReadBuffer & in) { bool has_fractional; - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + + } + else + return readFloatTextFastImpl(x, in, has_fractional); } /// With a @has_fractional flag /// Used for input_format_try_infer_integers template bool tryReadFloatTextExt(T & x, ReadBuffer & in, bool & has_fractional) { - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } + template bool tryReadFloatTextExtNoExponent(T & x, ReadBuffer & in, bool & has_fractional) { - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } } diff --git a/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp index b471989076b..4b79be98810 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp @@ -580,6 +580,7 @@ template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; +template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader>; @@ -602,6 +603,7 @@ template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; +template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader>; diff --git a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp index c3c7db510ed..328dd37107e 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp @@ -644,6 +644,7 @@ template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; +template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; From 1da6e1fffa8e5cc40d71fee52d6f2742a59d8f21 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:25:29 +0100 Subject: [PATCH 372/566] Conversions --- src/Functions/FunctionsConversion.cpp | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 1c662dd1d9a..f37dff35862 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -7,10 +7,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -73,8 +71,10 @@ #include #include + namespace DB { + namespace Setting { extern const SettingsBool cast_ipv4_ipv6_default_on_conversion_error; @@ -1862,6 +1862,11 @@ struct ConvertImpl } } + if constexpr ((std::is_same_v || std::is_same_v) + && !(std::is_same_v || std::is_same_v)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", + TypeName, TypeName); + if constexpr (std::is_same_v || std::is_same_v) { @@ -2875,6 +2880,7 @@ struct NameToInt32 { static constexpr auto name = "toInt32"; }; struct NameToInt64 { static constexpr auto name = "toInt64"; }; struct NameToInt128 { static constexpr auto name = "toInt128"; }; struct NameToInt256 { static constexpr auto name = "toInt256"; }; +struct NameToBFloat16 { static constexpr auto name = "toBFloat16"; }; struct NameToFloat32 { static constexpr auto name = "toFloat32"; }; struct NameToFloat64 { static constexpr auto name = "toFloat64"; }; struct NameToUUID { static constexpr auto name = "toUUID"; }; @@ -2893,6 +2899,7 @@ using FunctionToInt32 = FunctionConvert>; using FunctionToInt128 = FunctionConvert>; using FunctionToInt256 = FunctionConvert>; +using FunctionToBFloat16 = FunctionConvert>; using FunctionToFloat32 = FunctionConvert>; using FunctionToFloat64 = FunctionConvert>; @@ -2930,7 +2937,7 @@ template <> struct FunctionTo { using Type = FunctionToInt32; }; template <> struct FunctionTo { using Type = FunctionToInt64; }; template <> struct FunctionTo { using Type = FunctionToInt128; }; template <> struct FunctionTo { using Type = FunctionToInt256; }; -//template <> struct FunctionTo { using Type = FunctionToBFloat16; }; +template <> struct FunctionTo { using Type = FunctionToBFloat16; }; template <> struct FunctionTo { using Type = FunctionToFloat32; }; template <> struct FunctionTo { using Type = FunctionToFloat64; }; @@ -2973,6 +2980,7 @@ struct NameToInt32OrZero { static constexpr auto name = "toInt32OrZero"; }; struct NameToInt64OrZero { static constexpr auto name = "toInt64OrZero"; }; struct NameToInt128OrZero { static constexpr auto name = "toInt128OrZero"; }; struct NameToInt256OrZero { static constexpr auto name = "toInt256OrZero"; }; +struct NameToBFloat16OrZero { static constexpr auto name = "toBFloat16OrZero"; }; struct NameToFloat32OrZero { static constexpr auto name = "toFloat32OrZero"; }; struct NameToFloat64OrZero { static constexpr auto name = "toFloat64OrZero"; }; struct NameToDateOrZero { static constexpr auto name = "toDateOrZero"; }; @@ -2999,6 +3007,7 @@ using FunctionToInt32OrZero = FunctionConvertFromString; using FunctionToInt128OrZero = FunctionConvertFromString; using FunctionToInt256OrZero = FunctionConvertFromString; +using FunctionToBFloat16OrZero = FunctionConvertFromString; using FunctionToFloat32OrZero = FunctionConvertFromString; using FunctionToFloat64OrZero = FunctionConvertFromString; using FunctionToDateOrZero = FunctionConvertFromString; @@ -3025,6 +3034,7 @@ struct NameToInt32OrNull { static constexpr auto name = "toInt32OrNull"; }; struct NameToInt64OrNull { static constexpr auto name = "toInt64OrNull"; }; struct NameToInt128OrNull { static constexpr auto name = "toInt128OrNull"; }; struct NameToInt256OrNull { static constexpr auto name = "toInt256OrNull"; }; +struct NameToBFloat16OrNull { static constexpr auto name = "toBFloat16OrNull"; }; struct NameToFloat32OrNull { static constexpr auto name = "toFloat32OrNull"; }; struct NameToFloat64OrNull { static constexpr auto name = "toFloat64OrNull"; }; struct NameToDateOrNull { static constexpr auto name = "toDateOrNull"; }; @@ -3051,6 +3061,7 @@ using FunctionToInt32OrNull = FunctionConvertFromString; using FunctionToInt128OrNull = FunctionConvertFromString; using FunctionToInt256OrNull = FunctionConvertFromString; +using FunctionToBFloat16OrNull = FunctionConvertFromString; using FunctionToFloat32OrNull = FunctionConvertFromString; using FunctionToFloat64OrNull = FunctionConvertFromString; using FunctionToDateOrNull = FunctionConvertFromString; @@ -5194,7 +5205,7 @@ private: if constexpr (is_any_of) { @@ -5447,6 +5458,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5485,6 +5497,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5513,6 +5526,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From e65bb147d553b3fcd5f361366547b2858a122247 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:27:53 +0100 Subject: [PATCH 373/566] Style --- src/Functions/exp.cpp | 6 ++++++ src/Functions/log.cpp | 5 +++++ src/Functions/sigmoid.cpp | 6 ++++++ src/Functions/tanh.cpp | 6 ++++++ 4 files changed, 23 insertions(+) diff --git a/src/Functions/exp.cpp b/src/Functions/exp.cpp index 07c9288e8ab..24f1d313831 100644 --- a/src/Functions/exp.cpp +++ b/src/Functions/exp.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/log.cpp b/src/Functions/log.cpp index beaa8128b2b..49fc509634b 100644 --- a/src/Functions/log.cpp +++ b/src/Functions/log.cpp @@ -4,6 +4,11 @@ namespace DB { +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp index 1179329845d..bb9710a15fe 100644 --- a/src/Functions/sigmoid.cpp +++ b/src/Functions/sigmoid.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/tanh.cpp b/src/Functions/tanh.cpp index 293318f9bbb..d0e1440485b 100644 --- a/src/Functions/tanh.cpp +++ b/src/Functions/tanh.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { From b4acc885f35e4cccae818fca477efffbc9332ded Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:37:26 +0100 Subject: [PATCH 374/566] Documentation --- docs/en/sql-reference/data-types/float.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/data-types/float.md b/docs/en/sql-reference/data-types/float.md index 3c789076c1e..7185308bdce 100644 --- a/docs/en/sql-reference/data-types/float.md +++ b/docs/en/sql-reference/data-types/float.md @@ -1,10 +1,10 @@ --- slug: /en/sql-reference/data-types/float sidebar_position: 4 -sidebar_label: Float32, Float64 +sidebar_label: Float32, Float64, BFloat16 --- -# Float32, Float64 +# Float32, Float64, BFloat16 :::note If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead. @@ -117,3 +117,11 @@ SELECT 0 / 0 ``` See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md). + +## BFloat16 + +`BFloat16` is a 16-bit floating point data type with 8-bit exponent, sign, and 7-bit mantissa. + +It is useful for machine learning and AI applications. + +ClickHouse supports conversions between `Float32` and `BFloat16`. Most of other operations are not supported. From 6cb083621aece140d08d800620b0e5fe7bdc2da0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:47:59 +0100 Subject: [PATCH 375/566] Documentation --- ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt | 1 + utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt b/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt index e2966898be2..7cae8509b83 100644 --- a/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt +++ b/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt @@ -3131,3 +3131,4 @@ DistributedCachePoolBehaviourOnLimit SharedJoin ShareSet unacked +BFloat diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a08143467cd..9765b45c085 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -3154,3 +3154,4 @@ znode znodes zookeeperSessionUptime zstd +BFloat From 55540c2119ca0dbc5d4eb51763155f27883df0b9 Mon Sep 17 00:00:00 2001 From: Eduard Karacharov Date: Sun, 10 Nov 2024 11:18:12 +0200 Subject: [PATCH 376/566] fix: transform set constant only if allowed --- src/Storages/MergeTree/KeyCondition.cpp | 7 +- src/Storages/MergeTree/KeyCondition.h | 1 + .../03269_partition_key_not_in_set.reference | 13 +++ .../03269_partition_key_not_in_set.sql | 81 +++++++++++++++++++ 4 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03269_partition_key_not_in_set.reference create mode 100644 tests/queries/0_stateless/03269_partition_key_not_in_set.sql diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 17723d341fb..a2783ff4efe 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1158,6 +1158,7 @@ bool KeyCondition::tryPrepareSetIndex( const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num, + bool & allow_constant_transformation, bool & is_constant_transformed) { const auto & left_arg = func.getArgumentAt(0); @@ -1184,7 +1185,9 @@ bool KeyCondition::tryPrepareSetIndex( set_transforming_chains.push_back(set_transforming_chain); } // For partition index, checking if set can be transformed to prune any partitions - else if (single_point && canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain)) + else if ( + single_point && allow_constant_transformation + && canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain)) { indexes_mapping.push_back(index_mapping); data_types.push_back(data_type); @@ -1954,7 +1957,7 @@ bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNEleme if (functionIsInOrGlobalInOperator(func_name)) { - if (tryPrepareSetIndex(func, out, key_column_num, is_constant_transformed)) + if (tryPrepareSetIndex(func, out, key_column_num, allow_constant_transformation, is_constant_transformed)) { key_arg_pos = 0; is_set_const = true; diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 8c946bd3bbd..20b40271dc2 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -312,6 +312,7 @@ private: const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num, + bool & allow_constant_transformation, bool & is_constant_transformed); /// Checks that the index can not be used. diff --git a/tests/queries/0_stateless/03269_partition_key_not_in_set.reference b/tests/queries/0_stateless/03269_partition_key_not_in_set.reference new file mode 100644 index 00000000000..1e34df0c77e --- /dev/null +++ b/tests/queries/0_stateless/03269_partition_key_not_in_set.reference @@ -0,0 +1,13 @@ +-- Monotonic function in partition key +48 +48 +-- Non-monotonic function in partition key +48 +48 +-- Multiple partition columns +50 +50 +96 +96 +98 +98 diff --git a/tests/queries/0_stateless/03269_partition_key_not_in_set.sql b/tests/queries/0_stateless/03269_partition_key_not_in_set.sql new file mode 100644 index 00000000000..562521fb7ee --- /dev/null +++ b/tests/queries/0_stateless/03269_partition_key_not_in_set.sql @@ -0,0 +1,81 @@ +-- Related to https://github.com/ClickHouse/ClickHouse/issues/69829 +-- +-- The main goal of the test is to assert that constant transformation +-- for set constant while partition pruning won't be performed +-- if it's not allowed (NOT IN operator case) + +DROP TABLE IF EXISTS 03269_filters; +CREATE TABLE 03269_filters ( + id Int32, + dt Date +) +engine = MergeTree +order by id; + +INSERT INTO 03269_filters +SELECT 6, '2020-01-01' +UNION ALL +SELECT 38, '2021-01-01'; + +SELECT '-- Monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_monotonic; +CREATE TABLE 03269_single_monotonic( + id Int32 +) +ENGINE = MergeTree +PARTITION BY intDiv(id, 10) +ORDER BY id; + +INSERT INTO 03269_single_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN ( + SELECT id FROM 03269_filters +); + +DROP TABLE 03269_single_monotonic; + +SELECT '-- Non-monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_non_monotonic; +CREATE TABLE 03269_single_non_monotonic ( + id Int32 +) +ENGINE = MergeTree +PARTITION BY id % 10 +ORDER BY id; + +INSERT INTO 03269_single_non_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (SELECT id FROM 03269_filters); + +DROP TABLE 03269_single_non_monotonic; + +SELECT '-- Multiple partition columns'; + +DROP TABLE IF EXISTS 03269_multiple_part_cols; +CREATE TABLE 03269_multiple_part_cols ( + id Int32, + dt Date, +) +ENGINE = MergeTree +PARTITION BY (dt, intDiv(id, 10)) +ORDER BY id; + +INSERT INTO 03269_multiple_part_cols +SELECT number, '2020-01-01' FROM numbers(50) +UNION ALL +SELECT number, '2021-01-01' FROM numbers(50); + +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN ('2020-01-01'); +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN (SELECT dt FROM 03269_filters WHERE dt < '2021-01-01'); + +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (SELECT id FROM 03269_filters); + +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN ((6, '2020-01-01'), (38, '2021-01-01')); +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN (SELECT id, dt FROM 03269_filters); + +DROP TABLE 03269_multiple_part_cols; From bec94da77e8333d64c71b4bf778fbf78d10a8519 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 13:19:08 +0100 Subject: [PATCH 377/566] Progressing --- src/DataTypes/DataTypesDecimal.cpp | 8 +++----- src/DataTypes/DataTypesDecimal.h | 2 -- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index fddae052ada..63bd4bf2a59 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -262,9 +262,9 @@ FOR_EACH_ARITHMETIC_TYPE(INVOKE); template requires (is_arithmetic_v && IsDataTypeDecimal) -ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value*/, UInt32 /*scale*/, typename ToDataType::FieldType & /*result*/) +ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType & result) { -/* using FromFieldType = typename FromDataType::FieldType; + using FromFieldType = typename FromDataType::FieldType; using ToFieldType = typename ToDataType::FieldType; using ToNativeType = typename ToFieldType::NativeType; @@ -306,9 +306,7 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); else return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); - }*/ - - return ReturnType(); + } } #define DISPATCH(FROM_DATA_TYPE, TO_DATA_TYPE) \ diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index e0d49408981..09a25617506 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -3,9 +3,7 @@ #include #include #include -#include #include -#include #include #include #include From f2d6b1db7fb8b8eee52e2a33ce6f88648fe1c863 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 12:39:10 +0000 Subject: [PATCH 378/566] Better --- contrib/SimSIMD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index bb0bd2e7137..fa60f1b8e35 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit bb0bd2e7137f02c555341d7c93124ed19f3c24fb +Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3 From f0dc1330eb9d830161531819432a611a363fdc6b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 13:53:08 +0100 Subject: [PATCH 379/566] Rounding --- src/Functions/FunctionsRound.h | 42 ++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 255eca5b406..70ad4d17718 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -268,6 +268,19 @@ inline double roundWithMode(double x, RoundingMode mode) std::unreachable(); } +inline BFloat16 roundWithMode(BFloat16 x, RoundingMode mode) +{ + switch (mode) + { + case RoundingMode::Round: return BFloat16(nearbyintf(Float32(x))); + case RoundingMode::Floor: return BFloat16(floorf(Float32(x))); + case RoundingMode::Ceil: return BFloat16(ceilf(Float32(x))); + case RoundingMode::Trunc: return BFloat16(truncf(Float32(x))); + } + + std::unreachable(); +} + template class FloatRoundingComputationBase { @@ -289,6 +302,11 @@ public: } }; +template <> +class FloatRoundingComputationBase : public FloatRoundingComputationBase +{ +}; + /** Implementation of low-level round-off functions for floating-point values. */ @@ -688,20 +706,26 @@ public: using Types = std::decay_t; using DataType = typename Types::RightType; - if constexpr ((IsDataTypeNumber || IsDataTypeDecimal) - && !std::is_same_v) + if (arguments.size() > 1) { - if (arguments.size() > 1) + const ColumnWithTypeAndName & scale_column = arguments[1]; + + auto call_scale = [&](const auto & scaleTypes) -> bool { - const ColumnWithTypeAndName & scale_column = arguments[1]; - res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); + using ScaleTypes = std::decay_t; + using ScaleType = typename ScaleTypes::RightType; + + res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); return true; - } - res = Dispatcher::template apply(value_arg.column.get()); + }; + + TypeIndex right_index = scale_column.type->getTypeId(); + if (!callOnBasicType(right_index, call_scale)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale argument for rounding functions must have integer type"); return true; } - else - return false; + res = Dispatcher::template apply(value_arg.column.get()); + return true; }; #if !defined(__SSE4_1__) From db98fb4c79252d6305eabc06a749e2082bb1c489 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 14:39:45 +0100 Subject: [PATCH 380/566] Documentation --- src/Functions/FunctionsConversion.cpp | 64 +++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index f37dff35862..37a4ba30d30 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -5458,7 +5458,17 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts Float32 to BFloat16 with losing the precision. + +Example: +[example:typical] +)", + .examples{ + {"typical", "SELECT toBFloat16(12.3::Float32);", "12.3125"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); @@ -5497,7 +5507,31 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts String to BFloat16. + +If the string does not represent a floating point value, the function returns zero. + +The function allows a silent loss of precision while converting from the string representation. In that case, it will return the truncated result. + +Example of successful conversion: +[example:typical] + +Examples of not successful conversion: +[example:invalid1] +[example:invalid2] + +Example of a loss of precision: +[example:precision] +)", + .examples{ + {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}}, + {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}}, + {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}}, + {"precision", "SELECT toBFloat16OrZero('12.3456789');", "12.375"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5526,7 +5560,31 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts String to Nullable(BFloat16). + +If the string does not represent a floating point value, the function returns NULL. + +The function allows a silent loss of precision while converting from the string representation. In that case, it will return the truncated result. + +Example of successful conversion: +[example:typical] + +Examples of not successful conversion: +[example:invalid1] +[example:invalid2] + +Example of a loss of precision: +[example:precision] +)", + .examples{ + {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}}, + {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}}, + {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}}, + {"precision", "SELECT toBFloat16OrNull('12.3456789');", "12.375"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From d1e638da6e65a2f0de4aa72b78fd894c090606de Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Sun, 10 Nov 2024 15:12:21 +0100 Subject: [PATCH 381/566] Let's name cherry-pick branches the same way as backports --- tests/ci/cherry_pick.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index a796f63de6c..9bdc184f661 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -97,7 +97,7 @@ close it. self.pr = pr self.repo = repo - self.cherrypick_branch = f"cherrypick/{name}/{pr.merge_commit_sha}" + self.cherrypick_branch = f"cherrypick/{name}/{pr.number}" self.backport_branch = f"backport/{name}/{pr.number}" self.cherrypick_pr = None # type: Optional[PullRequest] self.backport_pr = None # type: Optional[PullRequest] From 1c85a0401fbbddccbd3e310a965ce0eb67079a2b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 15:14:17 +0100 Subject: [PATCH 382/566] Documentation --- src/Functions/FunctionsConversion.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 37a4ba30d30..7f4ccc338cf 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -5526,9 +5526,9 @@ Example of a loss of precision: [example:precision] )", .examples{ - {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}}, - {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}}, - {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}}, + {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}, + {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}, + {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}, {"precision", "SELECT toBFloat16OrZero('12.3456789');", "12.375"}}, .categories{"Conversion"}}); @@ -5579,9 +5579,9 @@ Example of a loss of precision: [example:precision] )", .examples{ - {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}}, - {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}}, - {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}}, + {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}, + {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}, + {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}, {"precision", "SELECT toBFloat16OrNull('12.3456789');", "12.375"}}, .categories{"Conversion"}}); From bf8fc60bacbb95e12760b00960115e2a6230c280 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 16:20:44 +0100 Subject: [PATCH 383/566] Arithmetic --- src/Functions/FunctionBinaryArithmetic.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 854b40df441..43140427170 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -804,7 +804,7 @@ class FunctionBinaryArithmetic : public IFunction DataTypeFixedString, DataTypeString, DataTypeInterval>; - using Floats = TypeList; + using Floats = TypeList; using ValidTypes = std::conditional_t, @@ -2043,7 +2043,15 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A using DecimalResultType = typename BinaryOperationTraits::DecimalResultDataType; if constexpr (std::is_same_v) + { return nullptr; + } + else if constexpr ((std::is_same_v || std::is_same_v) + && (sizeof(typename LeftDataType::FieldType) > 8 || sizeof(typename RightDataType::FieldType) > 8)) + { + /// Big integers and BFloat16 are not supported together. + return nullptr; + } else // we can't avoid the else because otherwise the compiler may assume the ResultDataType may be Invalid // and that would produce the compile error. { @@ -2060,7 +2068,7 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A ColumnPtr left_col = nullptr; ColumnPtr right_col = nullptr; - /// When Decimal op Float32/64, convert both of them into Float64 + /// When Decimal op Float32/64/16, convert both of them into Float64 if constexpr (decimal_with_float) { const auto converted_type = std::make_shared(); @@ -2095,7 +2103,6 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A /// Here we check if we have `intDiv` or `intDivOrZero` and at least one of the arguments is decimal, because in this case originally we had result as decimal, so we need to convert result into integer after calculations else if constexpr (!decimal_with_float && (is_int_div || is_int_div_or_zero) && (IsDataTypeDecimal || IsDataTypeDecimal)) { - if constexpr (!std::is_same_v) { DataTypePtr type_res; From 62c94a784158274e28cf05136cf4023de47f4f01 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 16:40:24 +0100 Subject: [PATCH 384/566] Maybe better --- cmake/cpu_features.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 2bb6deb4847..dbc77d835be 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -85,7 +85,7 @@ elseif (ARCH_AARCH64) # [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions- # [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en # [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md - set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc") + set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16") endif () # Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM From 08e6e598f7c140d0be39a64d933521872716ed2c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 17:41:37 +0100 Subject: [PATCH 385/566] Better code --- src/Common/findExtreme.h | 2 +- src/DataTypes/IDataType.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Common/findExtreme.h b/src/Common/findExtreme.h index c2b31c51e87..68e7360d6e2 100644 --- a/src/Common/findExtreme.h +++ b/src/Common/findExtreme.h @@ -11,7 +11,7 @@ namespace DB { template -concept has_find_extreme_implementation = (is_any_of); +concept has_find_extreme_implementation = (is_any_of); template std::optional findExtremeMin(const T * __restrict ptr, size_t start, size_t end); diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 4d64b927d83..1e41d6b2eba 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -606,7 +606,6 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int16) \ M(Int32) \ M(Int64) \ - M(BFloat16) \ M(Float32) \ M(Float64) From b6b850a2f11301272ee28fe2274733c2cdb0c7c6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 17:03:35 +0000 Subject: [PATCH 386/566] Docs: Add row and byte sizes of tables --- docs/en/getting-started/example-datasets/tpch.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index 5fa0d779ecd..379b92cbe9a 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example, ./dbgen -s 100 ``` +Detailed table sizes with scale factor 100: + +| Table | size (in rows) | size (compressed in ClickHouse) | +|----------|----------------|---------------------------------| +| nation | 25 | 2 kB | +| region | 5 | 1 kB | +| part | 20.000.000 | 895 MB | +| supplier | 1.000.000 | 75 MB | +| partsupp | 80.000.000 | 4.37 GB | +| customer | 15.000.000 | 1.19 GB | +| orders | 150.000.000 | 6.15 GB | +| lineitem | 600.00.00 | 26.69 GB | + +(The table sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions. + Now create tables in ClickHouse. We stick as closely as possible to the rules of the TPC-H specification: From 3668a78589821d89f8f7cce92e6c2bc54fff6ea3 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 17:24:00 +0000 Subject: [PATCH 387/566] Fix spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a08143467cd..a58b5e9ff58 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -186,7 +186,6 @@ ComplexKeyCache ComplexKeyDirect ComplexKeyHashed Composable -composable ConcurrencyControlAcquired ConcurrencyControlSoftLimit Config @@ -405,12 +404,12 @@ ITION Identifiant IdentifierQuotingRule IdentifierQuotingStyle -Incrementing -IndexesAreNeighbors -InfluxDB InJodaSyntax InJodaSyntaxOrNull InJodaSyntaxOrZero +Incrementing +IndexesAreNeighbors +InfluxDB Instana IntN Integrations @@ -1475,6 +1474,7 @@ combinator combinators comparising composable +composable compressability concat concatAssumeInjective @@ -2355,6 +2355,7 @@ parsedatetime parsers partitionID partitionId +partsupp pathFull pclmulqdq pcre From f9fa5ed515daaf6bb30ed13fd882a4c92cb84351 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 20:38:51 +0000 Subject: [PATCH 388/566] Docs: Steps to populate TPC-H tables from S3 --- .../getting-started/example-datasets/tpch.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index 379b92cbe9a..c0bf54a5a7c 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -166,10 +166,26 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl ``` -The queries are generated by `./qgen -s `. Example queries for `s = 100`: +:::note +Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure +to create empty tables first using above `CREATE` statements. + +```sql +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +```` +::: ## Queries +The queries are generated by `./qgen -s `. Example queries for `s = 100`: + **Correctness** The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale From 892d43bd7d57faed05cdcef77e684a09dbad3e36 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 20:50:07 +0000 Subject: [PATCH 389/566] SF 1 vs. 100 --- .../getting-started/example-datasets/tpch.md | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index c0bf54a5a7c..de2c425b402 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -171,14 +171,25 @@ Instead of using tpch-kit and generating the tables by yourself, you can alterna to create empty tables first using above `CREATE` statements. ```sql -INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +-- Scaling factor 1 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; + +-- Scaling factor 100 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; ```` ::: From 9baa5911f9183e1652593b5d362545377baeea2a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sun, 10 Nov 2024 20:54:59 +0000 Subject: [PATCH 390/566] Debugging stack with PR queries --- src/Planner/findParallelReplicasQuery.cpp | 57 +++++++++++++++++------ 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 28e2dd8a0ea..fbe2993b7c6 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -23,6 +23,8 @@ #include #include +#include + namespace DB { namespace Setting @@ -38,12 +40,12 @@ namespace ErrorCodes /// Returns a list of (sub)queries (candidates) which may support parallel replicas. /// The rule is : -/// subquery has only LEFT or ALL INNER JOIN (or none), and left part is MergeTree table or subquery candidate as well. +/// subquery has only LEFT / RIGHT / ALL INNER JOIN (or none), and left / right part is MergeTree table or subquery candidate as well. /// /// Additional checks are required, so we return many candidates. The innermost subquery is on top. -std::stack getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) +std::vector getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) { - std::stack res; + std::vector res; while (query_tree_node) { @@ -75,7 +77,7 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre { const auto & query_node_to_process = query_tree_node->as(); query_tree_node = query_node_to_process.getJoinTree().get(); - res.push(&query_node_to_process); + res.push_back(&query_node_to_process); break; } case QueryTreeNodeType::UNION: @@ -162,14 +164,25 @@ QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const Cont return query->cloneAndReplace(visitor.replacement_map); } +static void dumpStack(const std::vector & stack) +{ + std::ranges::reverse_view rv{stack}; + for (const auto * node : rv) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}\n{}", CityHash_v1_0_2::Hash128to64(node->getTreeHash()), node->dumpTree()); +} + /// Find the best candidate for parallel replicas execution by verifying query plan. -/// If query plan has only Expression, Filter of Join steps, we can execute it fully remotely and check the next query. +/// If query plan has only Expression, Filter or Join steps, we can execute it fully remotely and check the next query. /// Otherwise we can execute current query up to WithMergableStage only. const QueryNode * findQueryForParallelReplicas( - std::stack stack, + std::vector stack, const std::unordered_map & mapping, const Settings & settings) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}", StackTrace().toString()); + + dumpStack(stack); + struct Frame { const QueryPlan::Node * node = nullptr; @@ -188,14 +201,20 @@ const QueryNode * findQueryForParallelReplicas( while (!stack.empty()) { - const QueryNode * const subquery_node = stack.top(); - stack.pop(); + const QueryNode * const subquery_node = stack.back(); + stack.pop_back(); auto it = mapping.find(subquery_node); /// This should not happen ideally. if (it == mapping.end()) break; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "{} : {}", + CityHash_v1_0_2::Hash128to64(it->first->getTreeHash()), + it->second->step->getName()); + std::stack nodes_to_check; nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; @@ -208,6 +227,8 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + if (children.empty()) { /// Found a source step. @@ -235,7 +256,7 @@ const QueryNode * findQueryForParallelReplicas( else { const auto * join = typeid_cast(step); - /// We've checked that JOIN is INNER/LEFT in query tree. + /// We've checked that JOIN is INNER/LEFT/RIGHT on query tree level before. /// Don't distribute UNION node. if (!join) return res; @@ -286,7 +307,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; /// We don't have any subquery and storage can process parallel replicas by itself. - if (stack.top() == query_tree_node.get()) + if (stack.back() == query_tree_node.get()) return nullptr; /// This is needed to avoid infinite recursion. @@ -309,18 +330,24 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr const auto & mapping = planner.getQueryNodeToPlanStepMapping(); const auto * res = findQueryForParallelReplicas(new_stack, mapping, context->getSettingsRef()); - /// Now, return a query from initial stack. if (res) { + // find query in initial stack while (!new_stack.empty()) { - if (res == new_stack.top()) - return stack.top(); + if (res == new_stack.back()) + { + res = stack.back(); + break; + } - stack.pop(); - new_stack.pop(); + stack.pop_back(); + new_stack.pop_back(); } } + + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); return res; } From a74f491df3c217bf4132b08118e4708b05d3bf60 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Sun, 10 Nov 2024 22:02:01 +0100 Subject: [PATCH 391/566] Fix typo --- docs/en/sql-reference/functions/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index c0256ba4735..04a87c369ab 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul ## Types of Results -All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. +All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. ## Constants From 7877d59ff6e7334cde310b2eec626bc6ba7442fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 22:13:22 +0100 Subject: [PATCH 392/566] Manual implementation --- base/base/BFloat16.h | 300 +++++++++++++++++- base/base/DecomposedFloat.h | 2 +- base/base/TypeLists.h | 5 +- src/AggregateFunctions/AggregateFunctionAvg.h | 2 +- .../AggregateFunctionDeltaSum.cpp | 6 +- .../AggregateFunctionDeltaSumTimestamp.cpp | 10 +- .../AggregateFunctionMaxIntersections.cpp | 6 +- .../AggregateFunctionSparkbar.cpp | 12 +- src/AggregateFunctions/AggregateFunctionSum.h | 11 +- src/Core/Types_fwd.h | 2 +- src/Functions/FunctionsRound.h | 2 +- src/Functions/PolygonUtils.h | 4 +- src/Functions/divide.cpp | 2 +- src/IO/WriteHelpers.h | 2 +- 14 files changed, 318 insertions(+), 48 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 99eab5c67cb..9c6196d6aab 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -1,22 +1,294 @@ #pragma once -#include +#include +#include -using BFloat16 = __bf16; +//using BFloat16 = __bf16; + +class BFloat16 +{ +private: + UInt16 x = 0; + +public: + constexpr BFloat16() = default; + constexpr BFloat16(const BFloat16 & other) = default; + constexpr BFloat16 & operator=(const BFloat16 & other) = default; + + explicit constexpr BFloat16(const Float32 & other) + { + x = static_cast(std::bit_cast(other) >> 16); + } + + template + explicit constexpr BFloat16(const T & other) + : BFloat16(Float32(other)) + { + } + + template + constexpr BFloat16 & operator=(const T & other) + { + *this = BFloat16(other); + return *this; + } + + explicit constexpr operator Float32() const + { + return std::bit_cast(static_cast(x) << 16); + } + + template + explicit constexpr operator T() const + { + return T(Float32(*this)); + } + + constexpr bool isFinite() const + { + return (x & 0b0111111110000000) != 0b0111111110000000; + } + + constexpr bool isNaN() const + { + return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000; + } + + constexpr bool signBit() const + { + return x & 0b1000000000000000; + } + + constexpr bool operator==(const BFloat16 & other) const + { + return x == other.x; + } + + constexpr bool operator!=(const BFloat16 & other) const + { + return x != other.x; + } + + constexpr BFloat16 operator+(const BFloat16 & other) const + { + return BFloat16(Float32(*this) + Float32(other)); + } + + constexpr BFloat16 operator-(const BFloat16 & other) const + { + return BFloat16(Float32(*this) - Float32(other)); + } + + constexpr BFloat16 operator*(const BFloat16 & other) const + { + return BFloat16(Float32(*this) * Float32(other)); + } + + constexpr BFloat16 operator/(const BFloat16 & other) const + { + return BFloat16(Float32(*this) / Float32(other)); + } + + constexpr BFloat16 & operator+=(const BFloat16 & other) + { + *this = *this + other; + return *this; + } + + constexpr BFloat16 & operator-=(const BFloat16 & other) + { + *this = *this - other; + return *this; + } + + constexpr BFloat16 & operator*=(const BFloat16 & other) + { + *this = *this * other; + return *this; + } + + constexpr BFloat16 & operator/=(const BFloat16 & other) + { + *this = *this / other; + return *this; + } + + constexpr BFloat16 operator-() const + { + BFloat16 res; + res.x = x ^ 0b1000000000000000; + return res; + } +}; + + +template +requires(!std::is_same_v) +constexpr bool operator==(const BFloat16 & a, const T & b) +{ + return Float32(a) == b; +} + +template +requires(!std::is_same_v) +constexpr bool operator==(const T & a, const BFloat16 & b) +{ + return a == Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator!=(const BFloat16 & a, const T & b) +{ + return Float32(a) != b; +} + +template +requires(!std::is_same_v) +constexpr bool operator!=(const T & a, const BFloat16 & b) +{ + return a != Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator<(const BFloat16 & a, const T & b) +{ + return Float32(a) < b; +} + +template +requires(!std::is_same_v) +constexpr bool operator<(const T & a, const BFloat16 & b) +{ + return a < Float32(b); +} + +constexpr inline bool operator<(BFloat16 a, BFloat16 b) +{ + return Float32(a) < Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator>(const BFloat16 & a, const T & b) +{ + return Float32(a) > b; +} + +template +requires(!std::is_same_v) +constexpr bool operator>(const T & a, const BFloat16 & b) +{ + return a > Float32(b); +} + +constexpr inline bool operator>(BFloat16 a, BFloat16 b) +{ + return Float32(a) > Float32(b); +} + + +template +requires(!std::is_same_v) +constexpr bool operator<=(const BFloat16 & a, const T & b) +{ + return Float32(a) <= b; +} + +template +requires(!std::is_same_v) +constexpr bool operator<=(const T & a, const BFloat16 & b) +{ + return a <= Float32(b); +} + +constexpr inline bool operator<=(BFloat16 a, BFloat16 b) +{ + return Float32(a) <= Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator>=(const BFloat16 & a, const T & b) +{ + return Float32(a) >= b; +} + +template +requires(!std::is_same_v) +constexpr bool operator>=(const T & a, const BFloat16 & b) +{ + return a >= Float32(b); +} + +constexpr inline bool operator>=(BFloat16 a, BFloat16 b) +{ + return Float32(a) >= Float32(b); +} + + +template +requires(!std::is_same_v) +constexpr inline auto operator+(T a, BFloat16 b) +{ + return a + Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator+(BFloat16 a, T b) +{ + return Float32(a) + b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator-(T a, BFloat16 b) +{ + return a - Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator-(BFloat16 a, T b) +{ + return Float32(a) - b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator*(T a, BFloat16 b) +{ + return a * Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator*(BFloat16 a, T b) +{ + return Float32(a) * b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator/(T a, BFloat16 b) +{ + return a / Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator/(BFloat16 a, T b) +{ + return Float32(a) / b; +} + namespace std { - inline constexpr bool isfinite(BFloat16 x) { return (bit_cast(x) & 0b0111111110000000) != 0b0111111110000000; } - inline constexpr bool signbit(BFloat16 x) { return bit_cast(x) & 0b1000000000000000; } -} - -inline Float32 BFloat16ToFloat32(BFloat16 x) -{ - return bit_cast(static_cast(bit_cast(x)) << 16); -} - -inline BFloat16 Float32ToBFloat16(Float32 x) -{ - return bit_cast(std::bit_cast(x) >> 16); + inline constexpr bool isfinite(BFloat16 x) { return x.isFinite(); } + inline constexpr bool isnan(BFloat16 x) { return x.isNaN(); } + inline constexpr bool signbit(BFloat16 x) { return x.signBit(); } } diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 26a929b4997..3bd059cb21c 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -11,7 +11,7 @@ template struct FloatTraits; template <> -struct FloatTraits<__bf16> +struct FloatTraits { using UInt = uint16_t; static constexpr size_t bits = 16; diff --git a/base/base/TypeLists.h b/base/base/TypeLists.h index ce3111b1da3..375ea94b5ea 100644 --- a/base/base/TypeLists.h +++ b/base/base/TypeLists.h @@ -9,10 +9,11 @@ namespace DB { using TypeListNativeInt = TypeList; -using TypeListFloat = TypeList; -using TypeListNativeNumber = TypeListConcat; +using TypeListNativeFloat = TypeList; +using TypeListNativeNumber = TypeListConcat; using TypeListWideInt = TypeList; using TypeListInt = TypeListConcat; +using TypeListFloat = TypeListConcat>; using TypeListIntAndFloat = TypeListConcat; using TypeListDecimal = TypeList; using TypeListNumber = TypeListConcat; diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 6e1e9289565..8d53a081ee0 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -231,7 +231,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final { - increment(place, static_cast(*columns[0]).getData()[row_num]); + increment(place, Numerator(static_cast(*columns[0]).getData()[row_num])); ++this->data(place).denominator; } diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp index 42169c34c25..c61b9918a35 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp @@ -27,9 +27,9 @@ namespace template struct AggregationFunctionDeltaSumData { - T sum = 0; - T last = 0; - T first = 0; + T sum{}; + T last{}; + T first{}; bool seen = false; }; diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index 5819c533fd9..dc1adead87c 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -25,11 +25,11 @@ namespace template struct AggregationFunctionDeltaSumTimestampData { - ValueType sum = 0; - ValueType first = 0; - ValueType last = 0; - TimestampType first_ts = 0; - TimestampType last_ts = 0; + ValueType sum{}; + ValueType first{}; + ValueType last{}; + TimestampType first_ts{}; + TimestampType last_ts{}; bool seen = false; }; diff --git a/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp index ca91f960dab..f4edec7f528 100644 --- a/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp +++ b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp @@ -155,9 +155,9 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - Int64 current_intersections = 0; - Int64 max_intersections = 0; - PointType position_of_max_intersections = 0; + Int64 current_intersections{}; + Int64 max_intersections{}; + PointType position_of_max_intersections{}; /// const_cast because we will sort the array auto & array = this->data(place).value; diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index 33412d50b21..de2a741e105 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -45,7 +45,7 @@ struct AggregateFunctionSparkbarData Y insert(const X & x, const Y & y) { if (isNaN(y) || y <= 0) - return 0; + return {}; auto [it, inserted] = points.insert({x, y}); if (!inserted) @@ -173,13 +173,13 @@ private: if (from_x >= to_x) { - size_t sz = updateFrame(values, 8); + size_t sz = updateFrame(values, Y{8}); values.push_back('\0'); offsets.push_back(offsets.empty() ? sz + 1 : offsets.back() + sz + 1); return; } - PaddedPODArray histogram(width, 0); + PaddedPODArray histogram(width, Y{0}); PaddedPODArray count_histogram(width, 0); /// The number of points in each bucket for (const auto & point : data.points) @@ -218,10 +218,10 @@ private: for (size_t i = 0; i < histogram.size(); ++i) { if (count_histogram[i] > 0) - histogram[i] /= count_histogram[i]; + histogram[i] = histogram[i] / count_histogram[i]; } - Y y_max = 0; + Y y_max{}; for (auto & y : histogram) { if (isNaN(y) || y <= 0) @@ -245,7 +245,7 @@ private: continue; } - constexpr auto levels_num = static_cast(BAR_LEVELS - 1); + constexpr auto levels_num = Y{BAR_LEVELS - 1}; if constexpr (is_floating_point) { y = y / (y_max / levels_num) + 1; diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index f6c51241a5c..7c7fb6338a2 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -83,7 +83,7 @@ struct AggregateFunctionSumData while (ptr < unrolled_end) { for (size_t i = 0; i < unroll_count; ++i) - Impl::add(partial_sums[i], ptr[i]); + Impl::add(partial_sums[i], T(ptr[i])); ptr += unroll_count; } @@ -95,7 +95,7 @@ struct AggregateFunctionSumData T local_sum{}; while (ptr < end_ptr) { - Impl::add(local_sum, *ptr); + Impl::add(local_sum, T(*ptr)); ++ptr; } Impl::add(sum, local_sum); @@ -227,7 +227,7 @@ struct AggregateFunctionSumData while (ptr < end_ptr) { if (!*condition_map == add_if_zero) - Impl::add(local_sum, *ptr); + Impl::add(local_sum, T(*ptr)); ++ptr; ++condition_map; } @@ -488,10 +488,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { const auto & column = assert_cast(*columns[0]); - if constexpr (is_big_int_v) - this->data(place).add(static_cast(column.getData()[row_num])); - else - this->data(place).add(column.getData()[row_num]); + this->data(place).add(static_cast(column.getData()[row_num])); } void addBatchSinglePlace( diff --git a/src/Core/Types_fwd.h b/src/Core/Types_fwd.h index 6d3383ae7ff..b94a29ce72c 100644 --- a/src/Core/Types_fwd.h +++ b/src/Core/Types_fwd.h @@ -21,7 +21,7 @@ using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; using Int256 = wide::integer<256, signed>; using UInt256 = wide::integer<256, unsigned>; -using BFloat16 = __bf16; +class BFloat16; namespace DB { diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 70ad4d17718..6c9cc8a37b3 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -298,7 +298,7 @@ public: static VectorType prepare(size_t scale) { - return load1(scale); + return load1(ScalarType(scale)); } }; diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index bf8241774a6..601ffcb00b4 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -583,7 +583,7 @@ struct CallPointInPolygon template static ColumnPtr call(const IColumn & x, const IColumn & y, PointInPolygonImpl && impl) { - using Impl = TypeListChangeRoot; + using Impl = TypeListChangeRoot; if (auto column = typeid_cast *>(&x)) return Impl::template call(*column, y, impl); return CallPointInPolygon::call(x, y, impl); @@ -609,7 +609,7 @@ struct CallPointInPolygon<> template NO_INLINE ColumnPtr pointInPolygon(const IColumn & x, const IColumn & y, PointInPolygonImpl && impl) { - using Impl = TypeListChangeRoot; + using Impl = TypeListChangeRoot; return Impl::call(x, y, impl); } diff --git a/src/Functions/divide.cpp b/src/Functions/divide.cpp index 7c67245c382..3947ba2d142 100644 --- a/src/Functions/divide.cpp +++ b/src/Functions/divide.cpp @@ -18,7 +18,7 @@ struct DivideFloatingImpl template static NO_SANITIZE_UNDEFINED Result apply(A a [[maybe_unused]], B b [[maybe_unused]]) { - return static_cast(a) / b; + return static_cast(a) / static_cast(b); } #if USE_EMBEDDED_COMPILER diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index f01e09e3f73..0a32c4c5446 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -174,7 +174,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } else if constexpr (std::is_same_v) { - Float32 f32 = BFloat16ToFloat32(x); + Float32 f32 = Float32(x); if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) result = itoa(Int32(f32), buffer) - buffer; From 16d05bbc6d9a1369b393f836d0ccd8ea64fe2057 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 22:41:40 +0100 Subject: [PATCH 393/566] Comparisons --- base/base/BFloat16.h | 22 +++++++++++++++++++++- src/Functions/FunctionsComparison.h | 7 +++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 9c6196d6aab..f7491b64eb3 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -4,8 +4,28 @@ #include -//using BFloat16 = __bf16; +/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32. + * It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16. + * It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits. + * + * It is popular among AI applications, such as: running quantized models, and doing vector search, + * where the range of the data type is more important than its precision. + * + * It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions. + * But it is rarely utilized by compilers. + * + * The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable. + * It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16. + * Here is a manual implementation of this data type. Only required operations are implemented. + * There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++. + * There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it, + * sometimes giving an "invalid function call" error (which means a sketchy implementation) + * and giving errors during the "instruction select pass" during link-time optimization. + * + * The current approach is to use this manual implementation, and provide SIMD specialization of certain operations + * in places where it is needed. + */ class BFloat16 { private: diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index be0875581a5..bcb9e0641b8 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -721,6 +721,7 @@ private: || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) + || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped))) return res; @@ -741,6 +742,7 @@ private: || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) + || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped))) return res; @@ -1289,9 +1291,10 @@ public: || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) + || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)))) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of the first argument of function {}", col_left_untyped->getName(), getName()); return res; @@ -1339,7 +1342,7 @@ public: getName(), left_type->getName(), right_type->getName()); - /// When Decimal comparing to Float32/64, we convert both of them into Float64. + /// When Decimal comparing to Float32/64/16, we convert both of them into Float64. /// Other systems like MySQL and Spark also do as this. if (left_is_float || right_is_float) { From 92e8fa23ba0073f2caa43d66bab5d99475d3c656 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 23:43:10 +0100 Subject: [PATCH 394/566] Remove obsolete setting from tests --- src/Databases/enableAllExperimentalSettings.cpp | 1 - tests/performance/avg_weighted.xml | 1 - tests/performance/reinterpret_as.xml | 1 - tests/queries/0_stateless/01035_avg.sql | 2 -- .../0_stateless/01182_materialized_view_different_structure.sql | 1 - tests/queries/0_stateless/01440_big_int_exotic_casts.sql | 2 -- .../0_stateless/01554_bloom_filter_index_big_integer_uuid.sql | 2 -- tests/queries/0_stateless/01622_byte_size.sql | 2 -- tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql | 2 -- tests/queries/0_stateless/01804_dictionary_decimal256_type.sql | 2 -- .../0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh | 2 -- 11 files changed, 18 deletions(-) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index d51d2671992..bc2dae55f97 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -24,7 +24,6 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_experimental_dynamic_type", 1); context->setSetting("allow_experimental_json_type", 1); context->setSetting("allow_experimental_vector_similarity_index", 1); - context->setSetting("allow_experimental_bigint_types", 1); context->setSetting("allow_experimental_window_functions", 1); context->setSetting("allow_experimental_geo_types", 1); context->setSetting("allow_experimental_map_type", 1); diff --git a/tests/performance/avg_weighted.xml b/tests/performance/avg_weighted.xml index edf3c19fdfa..ec1b7aae5c2 100644 --- a/tests/performance/avg_weighted.xml +++ b/tests/performance/avg_weighted.xml @@ -1,6 +1,5 @@ - 1 1 8 diff --git a/tests/performance/reinterpret_as.xml b/tests/performance/reinterpret_as.xml index d05ef3bb038..2e0fa0571c3 100644 --- a/tests/performance/reinterpret_as.xml +++ b/tests/performance/reinterpret_as.xml @@ -1,6 +1,5 @@ - 1 15G diff --git a/tests/queries/0_stateless/01035_avg.sql b/tests/queries/0_stateless/01035_avg.sql index a3cb35a80ec..0f7baddaec5 100644 --- a/tests/queries/0_stateless/01035_avg.sql +++ b/tests/queries/0_stateless/01035_avg.sql @@ -1,5 +1,3 @@ -SET allow_experimental_bigint_types=1; - CREATE TABLE IF NOT EXISTS test_01035_avg ( i8 Int8 DEFAULT i64, i16 Int16 DEFAULT i64, diff --git a/tests/queries/0_stateless/01182_materialized_view_different_structure.sql b/tests/queries/0_stateless/01182_materialized_view_different_structure.sql index 485f9985974..7e41172bd0c 100644 --- a/tests/queries/0_stateless/01182_materialized_view_different_structure.sql +++ b/tests/queries/0_stateless/01182_materialized_view_different_structure.sql @@ -20,7 +20,6 @@ SELECT sum(value) FROM (SELECT number, sum(number) AS value FROM (SELECT *, toDe CREATE TABLE src (n UInt64, s FixedString(16)) ENGINE=Memory; CREATE TABLE dst (n UInt8, s String) ENGINE = Memory; CREATE MATERIALIZED VIEW mv TO dst (n String) AS SELECT * FROM src; -SET allow_experimental_bigint_types=1; CREATE TABLE dist (n Int128) ENGINE=Distributed(test_cluster_two_shards, currentDatabase(), mv); INSERT INTO src SELECT number, toString(number) FROM numbers(1000); diff --git a/tests/queries/0_stateless/01440_big_int_exotic_casts.sql b/tests/queries/0_stateless/01440_big_int_exotic_casts.sql index 42fde9da01b..f411af897e8 100644 --- a/tests/queries/0_stateless/01440_big_int_exotic_casts.sql +++ b/tests/queries/0_stateless/01440_big_int_exotic_casts.sql @@ -32,8 +32,6 @@ SELECT number y, toInt128(number) - y, toInt256(number) - y, toUInt256(number) - SELECT -number y, toInt128(number) + y, toInt256(number) + y, toUInt256(number) + y FROM numbers_mt(10) ORDER BY number; -SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64, i256 Int256, u256 UInt256, d256 Decimal256(2)) ENGINE = Memory; diff --git a/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql b/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql index 3472f41092d..f82fe39f439 100644 --- a/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql +++ b/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql @@ -1,5 +1,3 @@ -SET allow_experimental_bigint_types = 1; - CREATE TABLE 01154_test (x Int128, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; INSERT INTO 01154_test VALUES (1), (2), (3); SELECT x FROM 01154_test WHERE x = 1; diff --git a/tests/queries/0_stateless/01622_byte_size.sql b/tests/queries/0_stateless/01622_byte_size.sql index 9f9de4e58e9..f73011f4151 100644 --- a/tests/queries/0_stateless/01622_byte_size.sql +++ b/tests/queries/0_stateless/01622_byte_size.sql @@ -4,8 +4,6 @@ select ''; select '# byteSize'; -set allow_experimental_bigint_types = 1; - -- numbers #0 -- select ''; select 'byteSize for numbers #0'; diff --git a/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql b/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql index 272bd2d7104..57483430cc0 100644 --- a/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql +++ b/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql @@ -1,6 +1,5 @@ -- Tags: no-parallel -set allow_experimental_bigint_types=1; drop database if exists db_01721; drop table if exists db_01721.table_decimal_dict; drop dictionary if exists db_01721.decimal_dict; @@ -77,4 +76,3 @@ SELECT dictGet('db_01721.decimal_dict', 'Decimal32_', toUInt64(5000)), drop table if exists table_decimal_dict; drop dictionary if exists cache_dict; drop database if exists db_01721; - diff --git a/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql b/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql index 08a8d0feb27..32b029442b9 100644 --- a/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql +++ b/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql @@ -1,7 +1,5 @@ -- Tags: no-parallel -SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS dictionary_decimal_source_table; CREATE TABLE dictionary_decimal_source_table ( diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 1294ba53e82..2a24a931696 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -6,8 +6,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query=" - SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS dictionary_decimal_source_table; CREATE TABLE dictionary_decimal_source_table ( From 19ab7d484a6d7a2346103c5468bca611df03e3d9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 23:50:31 +0100 Subject: [PATCH 395/566] Add an experimental setting --- src/Core/Settings.cpp | 5 ++++- src/Core/SettingsChangesHistory.cpp | 1 + .../parseColumnsListForTableFunction.cpp | 14 ++++++++++++++ .../parseColumnsListForTableFunction.h | 1 + 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 01339226c2d..7c2042ee16d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5729,7 +5729,10 @@ Enable experimental functions for natural language processing. Enable experimental hash functions )", EXPERIMENTAL) \ DECLARE(Bool, allow_experimental_object_type, false, R"( -Allow Object and JSON data types +Allow the obsolete Object data type +)", EXPERIMENTAL) \ + DECLARE(Bool, allow_experimental_bfloat16_type, false, R"( +Allow BFloat16 data type (under development). )", EXPERIMENTAL) \ DECLARE(Bool, allow_experimental_time_series_table, false, R"( Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 0ff9d0a6833..23aeeb47224 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -77,6 +77,7 @@ static std::initializer_list(&data_type)) diff --git a/src/Interpreters/parseColumnsListForTableFunction.h b/src/Interpreters/parseColumnsListForTableFunction.h index 6e00492c0ad..39b9f092d89 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.h +++ b/src/Interpreters/parseColumnsListForTableFunction.h @@ -20,6 +20,7 @@ struct DataTypeValidationSettings bool allow_experimental_object_type = true; bool allow_suspicious_fixed_string_types = true; bool allow_experimental_variant_type = true; + bool allow_experimental_bfloat16_type = true; bool allow_suspicious_variant_types = true; bool validate_nested_types = true; bool allow_experimental_dynamic_type = true; From 1a2ee7929e746395a6f0426b6935887af287fd30 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:16:09 +0100 Subject: [PATCH 396/566] More conversions --- src/Functions/FunctionsConversion.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 7f4ccc338cf..effaa6faa6d 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1862,11 +1862,6 @@ struct ConvertImpl } } - if constexpr ((std::is_same_v || std::is_same_v) - && !(std::is_same_v || std::is_same_v)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", - TypeName, TypeName); - if constexpr (std::is_same_v || std::is_same_v) { From f042c921ee84ef583f1b76c9d4587b963bd06f45 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:16:28 +0100 Subject: [PATCH 397/566] Distances --- base/base/BFloat16.h | 7 ++ src/Common/CPUID.h | 6 ++ src/Common/TargetSpecific.cpp | 3 + src/Common/TargetSpecific.h | 26 +++++- src/Functions/array/arrayDistance.cpp | 112 +++++++++++++++++++------- 5 files changed, 119 insertions(+), 35 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index f7491b64eb3..2df84dbc0f2 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -80,6 +80,13 @@ public: return x & 0b1000000000000000; } + constexpr BFloat16 abs() const + { + BFloat16 res; + res.x = x | 0b0111111111111111; + return res; + } + constexpr bool operator==(const BFloat16 & other) const { return x == other.x; diff --git a/src/Common/CPUID.h b/src/Common/CPUID.h index b49f7706904..b5c26e64d1e 100644 --- a/src/Common/CPUID.h +++ b/src/Common/CPUID.h @@ -266,6 +266,11 @@ inline bool haveAVX512VBMI2() noexcept return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ecx >> 6) & 1u); } +inline bool haveAVX512BF16() noexcept +{ + return haveAVX512F() && ((CPUInfo(0x7, 1).registers.eax >> 5) & 1u); +} + inline bool haveRDRAND() noexcept { return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x1).registers.ecx >> 30) & 1u); @@ -326,6 +331,7 @@ inline bool haveAMXINT8() noexcept OP(AVX512VL) \ OP(AVX512VBMI) \ OP(AVX512VBMI2) \ + OP(AVX512BF16) \ OP(PREFETCHWT1) \ OP(SHA) \ OP(ADX) \ diff --git a/src/Common/TargetSpecific.cpp b/src/Common/TargetSpecific.cpp index 8540c9a9986..4400d9a60b3 100644 --- a/src/Common/TargetSpecific.cpp +++ b/src/Common/TargetSpecific.cpp @@ -23,6 +23,8 @@ UInt32 getSupportedArchs() result |= static_cast(TargetArch::AVX512VBMI); if (CPU::CPUFlagsCache::have_AVX512VBMI2) result |= static_cast(TargetArch::AVX512VBMI2); + if (CPU::CPUFlagsCache::have_AVX512BF16) + result |= static_cast(TargetArch::AVX512BF16); if (CPU::CPUFlagsCache::have_AMXBF16) result |= static_cast(TargetArch::AMXBF16); if (CPU::CPUFlagsCache::have_AMXTILE) @@ -50,6 +52,7 @@ String toString(TargetArch arch) case TargetArch::AVX512BW: return "avx512bw"; case TargetArch::AVX512VBMI: return "avx512vbmi"; case TargetArch::AVX512VBMI2: return "avx512vbmi2"; + case TargetArch::AVX512BF16: return "avx512bf16"; case TargetArch::AMXBF16: return "amxbf16"; case TargetArch::AMXTILE: return "amxtile"; case TargetArch::AMXINT8: return "amxint8"; diff --git a/src/Common/TargetSpecific.h b/src/Common/TargetSpecific.h index f9523f667b2..5584bd1f63a 100644 --- a/src/Common/TargetSpecific.h +++ b/src/Common/TargetSpecific.h @@ -83,9 +83,10 @@ enum class TargetArch : UInt32 AVX512BW = (1 << 4), AVX512VBMI = (1 << 5), AVX512VBMI2 = (1 << 6), - AMXBF16 = (1 << 7), - AMXTILE = (1 << 8), - AMXINT8 = (1 << 9), + AVX512BF16 = (1 << 7), + AMXBF16 = (1 << 8), + AMXTILE = (1 << 9), + AMXINT8 = (1 << 10), }; /// Runtime detection. @@ -102,6 +103,7 @@ String toString(TargetArch arch); /// NOLINTNEXTLINE #define USE_MULTITARGET_CODE 1 +#define AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,avx512bf16"))) #define AVX512VBMI2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2"))) #define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw"))) @@ -111,6 +113,8 @@ String toString(TargetArch arch); #define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt"))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512BF16_SPECIFIC_CODE \ + _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,avx512bf16\"))),apply_to=function)") # define BEGIN_AVX512VBMI2_SPECIFIC_CODE \ _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2\"))),apply_to=function)") # define BEGIN_AVX512VBMI_SPECIFIC_CODE \ @@ -197,6 +201,14 @@ namespace TargetSpecific::AVX512VBMI2 { \ } \ END_TARGET_SPECIFIC_CODE +#define DECLARE_AVX512BF16_SPECIFIC_CODE(...) \ +BEGIN_AVX512BF16_SPECIFIC_CODE \ +namespace TargetSpecific::AVX512BF16 { \ + DUMMY_FUNCTION_DEFINITION \ + using namespace DB::TargetSpecific::AVX512BF16; \ + __VA_ARGS__ \ +} \ +END_TARGET_SPECIFIC_CODE #else @@ -211,6 +223,7 @@ END_TARGET_SPECIFIC_CODE #define DECLARE_AVX512BW_SPECIFIC_CODE(...) #define DECLARE_AVX512VBMI_SPECIFIC_CODE(...) #define DECLARE_AVX512VBMI2_SPECIFIC_CODE(...) +#define DECLARE_AVX512BF16_SPECIFIC_CODE(...) #endif @@ -229,7 +242,8 @@ DECLARE_AVX2_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512F_SPECIFIC_CODE(__VA_ARGS__) \ DECLARE_AVX512BW_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512VBMI_SPECIFIC_CODE (__VA_ARGS__) \ -DECLARE_AVX512VBMI2_SPECIFIC_CODE (__VA_ARGS__) +DECLARE_AVX512VBMI2_SPECIFIC_CODE (__VA_ARGS__) \ +DECLARE_AVX512BF16_SPECIFIC_CODE (__VA_ARGS__) DECLARE_DEFAULT_CODE( constexpr auto BuildArch = TargetArch::Default; /// NOLINT @@ -263,6 +277,10 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE( constexpr auto BuildArch = TargetArch::AVX512VBMI2; /// NOLINT ) // DECLARE_AVX512VBMI2_SPECIFIC_CODE +DECLARE_AVX512BF16_SPECIFIC_CODE( + constexpr auto BuildArch = TargetArch::AVX512BF16; /// NOLINT +) // DECLARE_AVX512BF16_SPECIFIC_CODE + /** Runtime Dispatch helpers for class members. * * Example of usage: diff --git a/src/Functions/array/arrayDistance.cpp b/src/Functions/array/arrayDistance.cpp index a1f48747eb6..da49359c422 100644 --- a/src/Functions/array/arrayDistance.cpp +++ b/src/Functions/array/arrayDistance.cpp @@ -14,6 +14,31 @@ #include #endif + +namespace +{ + inline BFloat16 fabs(BFloat16 x) + { + return x.abs(); + } + + inline BFloat16 sqrt(BFloat16 x) + { + return BFloat16(::sqrtf(Float32(x))); + } + + template + inline BFloat16 pow(BFloat16 x, T p) + { + return BFloat16(::powf(Float32(x), Float32(p))); + } + + inline BFloat16 fmax(BFloat16 x, BFloat16 y) + { + return BFloat16(::fmaxf(Float32(x), Float32(y))); + } +} + namespace DB { namespace ErrorCodes @@ -34,7 +59,7 @@ struct L1Distance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template @@ -65,7 +90,7 @@ struct L2Distance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template @@ -82,7 +107,7 @@ struct L2Distance #if USE_MULTITARGET_CODE template - AVX512_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( + AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( const ResultType * __restrict data_x, const ResultType * __restrict data_y, size_t i_max, @@ -90,19 +115,29 @@ struct L2Distance size_t & i_y, State & state) { - static constexpr bool is_float32 = std::is_same_v; - __m512 sums; - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) sums = _mm512_setzero_ps(); else sums = _mm512_setzero_pd(); - constexpr size_t n = is_float32 ? 16 : 8; + constexpr size_t n = sizeof(__m512) / sizeof(ResultType); for (; i_x + n < i_max; i_x += n, i_y += n) { - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2) + { + __m512 x_1 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_x + i_x))); + __m512 x_2 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_x + i_x + n / 2))); + __m512 y_1 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_y + i_y))); + __m512 y_2 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_y + i_y + n / 2))); + + __m512 differences_1 = _mm512_sub_ps(x_1, y_1); + __m512 differences_2 = _mm512_sub_ps(x_2, y_2); + sums = _mm512_fmadd_ps(differences_1, differences_1, sums); + sums = _mm512_fmadd_ps(differences_2, differences_2, sums); + } + else if constexpr (sizeof(ResultType) == 4) { __m512 x = _mm512_loadu_ps(data_x + i_x); __m512 y = _mm512_loadu_ps(data_y + i_y); @@ -118,7 +153,7 @@ struct L2Distance } } - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) state.sum = _mm512_reduce_add_ps(sums); else state.sum = _mm512_reduce_add_pd(sums); @@ -128,7 +163,7 @@ struct L2Distance template static ResultType finalize(const State & state, const ConstParams &) { - return sqrt(state.sum); + return sqrt(ResultType(state.sum)); } }; @@ -156,13 +191,13 @@ struct LpDistance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template static void accumulate(State & state, ResultType x, ResultType y, const ConstParams & params) { - state.sum += static_cast(std::pow(fabs(x - y), params.power)); + state.sum += static_cast(pow(fabs(x - y), params.power)); } template @@ -174,7 +209,7 @@ struct LpDistance template static ResultType finalize(const State & state, const ConstParams & params) { - return static_cast(std::pow(state.sum, params.inverted_power)); + return static_cast(pow(state.sum, params.inverted_power)); } }; @@ -187,7 +222,7 @@ struct LinfDistance template struct State { - FloatType dist = 0; + FloatType dist{}; }; template @@ -218,9 +253,9 @@ struct CosineDistance template struct State { - FloatType dot_prod = 0; - FloatType x_squared = 0; - FloatType y_squared = 0; + FloatType dot_prod{}; + FloatType x_squared{}; + FloatType y_squared{}; }; template @@ -241,7 +276,7 @@ struct CosineDistance #if USE_MULTITARGET_CODE template - AVX512_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( + AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( const ResultType * __restrict data_x, const ResultType * __restrict data_y, size_t i_max, @@ -249,13 +284,11 @@ struct CosineDistance size_t & i_y, State & state) { - static constexpr bool is_float32 = std::is_same_v; - __m512 dot_products; __m512 x_squareds; __m512 y_squareds; - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) { dot_products = _mm512_setzero_ps(); x_squareds = _mm512_setzero_ps(); @@ -268,11 +301,19 @@ struct CosineDistance y_squareds = _mm512_setzero_pd(); } - constexpr size_t n = is_float32 ? 16 : 8; + constexpr size_t n = sizeof(__m512) / sizeof(ResultType); for (; i_x + n < i_max; i_x += n, i_y += n) { - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2) + { + __m512 x = _mm512_loadu_ps(data_x + i_x); + __m512 y = _mm512_loadu_ps(data_y + i_y); + dot_products = _mm512_dpbf16_ps(dot_products, x, y); + x_squareds = _mm512_dpbf16_ps(x_squareds, x, x); + y_squareds = _mm512_dpbf16_ps(y_squareds, y, y); + } + if constexpr (sizeof(ResultType) == 4) { __m512 x = _mm512_loadu_ps(data_x + i_x); __m512 y = _mm512_loadu_ps(data_y + i_y); @@ -290,7 +331,7 @@ struct CosineDistance } } - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2 || sizeof(ResultType) == 4) { state.dot_prod = _mm512_reduce_add_ps(dot_products); state.x_squared = _mm512_reduce_add_ps(x_squareds); @@ -308,7 +349,7 @@ struct CosineDistance template static ResultType finalize(const State & state, const ConstParams &) { - return 1 - state.dot_prod / sqrt(state.x_squared * state.y_squared); + return ResultType(1) - state.dot_prod / sqrt(state.x_squared * state.y_squared); } }; @@ -353,11 +394,13 @@ public: return std::make_shared(); case TypeIndex::Float32: return std::make_shared(); + case TypeIndex::BFloat16: + return std::make_shared(); default: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), common_type->getName()); } @@ -367,6 +410,9 @@ public: { switch (result_type->getTypeId()) { + case TypeIndex::BFloat16: + return executeWithResultType(arguments, input_rows_count); + break; case TypeIndex::Float32: return executeWithResultType(arguments, input_rows_count); break; @@ -388,6 +434,7 @@ public: ACTION(Int16) \ ACTION(Int32) \ ACTION(Int64) \ + ACTION(BFloat16) \ ACTION(Float32) \ ACTION(Float64) @@ -412,7 +459,7 @@ private: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), type_x->getName()); } @@ -437,7 +484,7 @@ private: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), type_y->getName()); } @@ -548,13 +595,15 @@ private: /// SIMD optimization: process multiple elements in both input arrays at once. /// To avoid combinatorial explosion of SIMD kernels, focus on - /// - the two most common input/output types (Float32 x Float32) --> Float32 and (Float64 x Float64) --> Float64 instead of 10 x - /// 10 input types x 2 output types, + /// - the three most common input/output types (BFloat16 x BFloat16) --> BFloat16, + /// (Float32 x Float32) --> Float32 and (Float64 x Float64) --> Float64 + /// instead of 10 x 10 input types x 2 output types, /// - const/non-const inputs instead of non-const/non-const inputs /// - the two most common metrics L2 and cosine distance, /// - the most powerful SIMD instruction set (AVX-512F). #if USE_MULTITARGET_CODE - if constexpr (std::is_same_v && std::is_same_v) /// ResultType is Float32 or Float64 + /// ResultType is BFloat16, Float32 or Float64 + if constexpr (std::is_same_v && std::is_same_v) { if constexpr (std::is_same_v || std::is_same_v) @@ -638,4 +687,5 @@ FunctionPtr createFunctionArrayL2SquaredDistance(ContextPtr context_) { return F FunctionPtr createFunctionArrayLpDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } FunctionPtr createFunctionArrayLinfDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } FunctionPtr createFunctionArrayCosineDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } + } From 6dee7e42766177e30712f7c1c341663b3fba2f91 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:24:57 +0100 Subject: [PATCH 398/566] Fix style --- src/Databases/enableAllExperimentalSettings.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index bc2dae55f97..1be54664bc9 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -27,6 +27,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_experimental_window_functions", 1); context->setSetting("allow_experimental_geo_types", 1); context->setSetting("allow_experimental_map_type", 1); + context->setSetting("allow_experimental_bigint_types", 1); + context->setSetting("allow_experimental_bfloat16_type", 1); context->setSetting("allow_deprecated_error_prone_window_functions", 1); context->setSetting("allow_suspicious_low_cardinality_types", 1); From 89b015cecfad9a6a8f44039efa556d209ea50239 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:25:11 +0100 Subject: [PATCH 399/566] Do not compile BFloat16 --- src/DataTypes/IDataType.h | 3 ++- src/DataTypes/Native.cpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 1e41d6b2eba..8f06526ddbb 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -411,7 +411,8 @@ struct WhichDataType constexpr bool isBFloat16() const { return idx == TypeIndex::BFloat16; } constexpr bool isFloat32() const { return idx == TypeIndex::Float32; } constexpr bool isFloat64() const { return idx == TypeIndex::Float64; } - constexpr bool isFloat() const { return isBFloat16() || isFloat32() || isFloat64(); } + constexpr bool isNativeFloat() const { return isFloat32() || isFloat64(); } + constexpr bool isFloat() const { return isNativeFloat() || isBFloat16(); } constexpr bool isNativeNumber() const { return isNativeInteger() || isFloat(); } constexpr bool isNumber() const { return isInteger() || isFloat() || isDecimal(); } diff --git a/src/DataTypes/Native.cpp b/src/DataTypes/Native.cpp index 5dc490b0bd5..53354d7c6e0 100644 --- a/src/DataTypes/Native.cpp +++ b/src/DataTypes/Native.cpp @@ -37,7 +37,7 @@ bool canBeNativeType(const IDataType & type) return canBeNativeType(*data_type_nullable.getNestedType()); } - return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() + return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isNativeFloat() || data_type.isDate() || data_type.isDate32() || data_type.isDateTime() || data_type.isEnum(); } From 968a559917577a63464bbaf87d3e724912cb7d5a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:59:37 +0100 Subject: [PATCH 400/566] Add a test --- .../queries/0_stateless/03269_bf16.reference | 45 ++++++++++ tests/queries/0_stateless/03269_bf16.sql | 88 +++++++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 tests/queries/0_stateless/03269_bf16.reference create mode 100644 tests/queries/0_stateless/03269_bf16.sql diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference new file mode 100644 index 00000000000..daa26cb252f --- /dev/null +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -0,0 +1,45 @@ +1 -1 1.09375 -1.09375 1 -1 1.09375 -1.09375 18446744000000000000 -0 inf -inf nan +1.09375 1.09375 1.09375 1 +1 1 0 1 1 +0 2.1875 1.1962891 1 Float32 Float32 Float32 Float64 +-0.006250000000000089 2.19375 1.203125 1.0057142857142858 Float64 Float64 Float64 Float64 +0 0 1 0 +1000 1000 1 0 +2000 2000 1 0 +3000 2992 0 8 +4000 4000 1 0 +5000 4992 0 8 +6000 5984 0 16 +7000 6976 0 24 +8000 8000 1 0 +9000 8960 0 40 +49995000 49855104 4999.5 4985.5104 0 0 9999 9984 10000 925 10000 925 +0 0 1 0 +1000 1000 1 0 +2000 2000 1 0 +3000 2992 0 8 +4000 4000 1 0 +5000 4992 0 8 +6000 5984 0 16 +7000 6976 0 24 +8000 8000 1 0 +9000 8960 0 40 +49995000 49855104 4999.5 4985.5104 0 0 9999 9984 10000 925 10000 925 +Row 1: +────── +a32: [0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129.5,130,130.5,131,131.5,132,132.5,133,133.5,134,134.5,135,135.5,136,136.5,137,137.5,138,138.5,139,139.5,140,140.5,141,141.5,142,142.5,143,143.5,144,144.5,145,145.5,146,146.5,147,147.5,148,148.5,149,149.5,150,150.5,151,151.5,152,152.5,153,153.5,154,154.5,155,155.5,156,156.5,157,157.5,158,158.5,159,159.5,160,160.5,161,161.5,162,162.5,163,163.5,164,164.5,165,165.5,166,166.5,167,167.5,168,168.5,169,169.5,170,170.5,171,171.5,172,172.5,173,173.5,174,174.5,175,175.5,176,176.5,177,177.5,178,178.5,179,179.5,180,180.5,181,181.5,182,182.5,183,183.5,184,184.5,185,185.5,186,186.5,187,187.5,188,188.5,189,189.5,190,190.5,191,191.5] +a16: [0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128,129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136,137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144,145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152,153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160,161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168,169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176,177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184,185,185,186,186,187,187,188,188,189,189,190,190,191,191] +a32_1: [1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129.5,130,130.5,131,131.5,132,132.5,133,133.5,134,134.5,135,135.5,136,136.5,137,137.5,138,138.5,139,139.5,140,140.5,141,141.5,142,142.5,143,143.5,144,144.5,145,145.5,146,146.5,147,147.5,148,148.5,149,149.5,150,150.5,151,151.5,152,152.5,153,153.5,154,154.5,155,155.5,156,156.5,157,157.5,158,158.5,159,159.5,160,160.5,161,161.5,162,162.5,163,163.5,164,164.5,165,165.5,166,166.5,167,167.5,168,168.5,169,169.5,170,170.5,171,171.5,172,172.5,173,173.5,174,174.5,175,175.5,176,176.5,177,177.5,178,178.5,179,179.5,180,180.5,181,181.5,182,182.5,183,183.5,184,184.5,185,185.5,186,186.5,187,187.5,188,188.5,189,189.5,190,190.5,191,191.5,192,192.5] +a16_1: [1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136,137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144,145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152,153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160,161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168,169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176,177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184,185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192] +dotProduct(a32, a32_1): 4736944 -- 4.74 million +dotProduct(a16, a16_1): 4726688 -- 4.73 million +cosineDistance(a32, a32_1): 0.000010093636084174129 +cosineDistance(a16, a16_1): 0.00001010226319664298 +L2Distance(a32, a32_1): 19.595917942265423 +L2Distance(a16, a16_1): 19.595917942265423 +L1Distance(a32, a32_1): 384 +L1Distance(a16, a16_1): 384 +LinfDistance(a32, a32_1): 1 +LinfDistance(a16, a16_1): 1 +LpDistance(a32, a32_1, 5): 3.2875036590344515 +LpDistance(a16, a16_1, 5): 3.2875036590344515 diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql new file mode 100644 index 00000000000..375cca73b62 --- /dev/null +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -0,0 +1,88 @@ +SET allow_experimental_bfloat16_type = 1; + +-- This is a smoke test, non exhaustive. + +-- Conversions + +SELECT + 1::BFloat16, + -1::BFloat16, + 1.1::BFloat16, + -1.1::BFloat16, + CAST(1 AS BFloat16), + CAST(-1 AS BFloat16), + CAST(1.1 AS BFloat16), + CAST(-1.1 AS BFloat16), + CAST(0xFFFFFFFFFFFFFFFF AS BFloat16), + CAST(-0.0 AS BFloat16), + CAST(inf AS BFloat16), + CAST(-inf AS BFloat16), + CAST(nan AS BFloat16); + +-- Conversions back + +SELECT + CAST(1.1::BFloat16 AS BFloat16), + CAST(1.1::BFloat16 AS Float32), + CAST(1.1::BFloat16 AS Float64), + CAST(1.1::BFloat16 AS Int8); + +-- Comparisons + +SELECT + 1.1::BFloat16 = 1.1::BFloat16, + 1.1::BFloat16 < 1.1, + 1.1::BFloat16 > 1.1, + 1.1::BFloat16 > 1, + 1.1::BFloat16 = 1.09375; + +-- Arithmetic + +SELECT + 1.1::BFloat16 - 1.1::BFloat16 AS a, + 1.1::BFloat16 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1::BFloat16 AS c, + 1.1::BFloat16 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +SELECT + 1.1::BFloat16 - 1.1 AS a, + 1.1 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1 AS c, + 1.1 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +-- Tables + +DROP TABLE IF EXISTS t; +CREATE TEMPORARY TABLE t (n UInt64, x BFloat16); +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; + +-- Aggregate functions + +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- MergeTree + +DROP TABLE t; +CREATE TABLE t (n UInt64, x BFloat16) ENGINE = MergeTree ORDER BY n; +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- Distances + +WITH + arrayMap(x -> toFloat32(x) / 2, range(384)) AS a32, + arrayMap(x -> toBFloat16(x) / 2, range(384)) AS a16, + arrayMap(x -> x + 1, a32) AS a32_1, + arrayMap(x -> x + 1, a16) AS a16_1 +SELECT a32, a16, a32_1, a16_1, + dotProduct(a32, a32_1), dotProduct(a16, a16_1), + cosineDistance(a32, a32_1), cosineDistance(a16, a16_1), + L2Distance(a32, a32_1), L2Distance(a16, a16_1), + L1Distance(a32, a32_1), L1Distance(a16, a16_1), + LinfDistance(a32, a32_1), LinfDistance(a16, a16_1), + LpDistance(a32, a32_1, 5), LpDistance(a16, a16_1, 5) +FORMAT Vertical; From bfeefa2c8a5ce71dd0cc90d68d831d694aef3418 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:02:10 +0100 Subject: [PATCH 401/566] Introspection --- src/Functions/FunctionsBinaryRepresentation.cpp | 1 + tests/queries/0_stateless/03269_bf16.reference | 1 + tests/queries/0_stateless/03269_bf16.sql | 7 +++++++ 3 files changed, 9 insertions(+) diff --git a/src/Functions/FunctionsBinaryRepresentation.cpp b/src/Functions/FunctionsBinaryRepresentation.cpp index c8e8f167e4c..50a3c0862f4 100644 --- a/src/Functions/FunctionsBinaryRepresentation.cpp +++ b/src/Functions/FunctionsBinaryRepresentation.cpp @@ -296,6 +296,7 @@ public: tryExecuteUIntOrInt(column, res_column) || tryExecuteString(column, res_column) || tryExecuteFixedString(column, res_column) || + tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) || tryExecuteDecimal(column, res_column) || diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference index daa26cb252f..31395d92e2b 100644 --- a/tests/queries/0_stateless/03269_bf16.reference +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -43,3 +43,4 @@ LinfDistance(a32, a32_1): 1 LinfDistance(a16, a16_1): 1 LpDistance(a32, a32_1, 5): 3.2875036590344515 LpDistance(a16, a16_1, 5): 3.2875036590344515 +1.09375 8C3F 1000110000111111 2 16268 8C3F diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql index 375cca73b62..de4e2f6da47 100644 --- a/tests/queries/0_stateless/03269_bf16.sql +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -86,3 +86,10 @@ SELECT a32, a16, a32_1, a16_1, LinfDistance(a32, a32_1), LinfDistance(a16, a16_1), LpDistance(a32, a32_1, 5), LpDistance(a16, a16_1, 5) FORMAT Vertical; + +-- Introspection + +SELECT 1.1::BFloat16 AS x, + hex(x), bin(x), + byteSize(x), + reinterpretAsUInt16(x), hex(reinterpretAsString(x)); From 3e50cf94fe858e8440ffd69040334356326b97db Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:04:55 +0100 Subject: [PATCH 402/566] Rounding --- tests/queries/0_stateless/03269_bf16.reference | 1 + tests/queries/0_stateless/03269_bf16.sql | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference index 31395d92e2b..896cc307623 100644 --- a/tests/queries/0_stateless/03269_bf16.reference +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -44,3 +44,4 @@ LinfDistance(a16, a16_1): 1 LpDistance(a32, a32_1, 5): 3.2875036590344515 LpDistance(a16, a16_1, 5): 3.2875036590344515 1.09375 8C3F 1000110000111111 2 16268 8C3F +1.09375 1 1.09375 1.0859375 0 diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql index de4e2f6da47..b332a6e3119 100644 --- a/tests/queries/0_stateless/03269_bf16.sql +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -93,3 +93,8 @@ SELECT 1.1::BFloat16 AS x, hex(x), bin(x), byteSize(x), reinterpretAsUInt16(x), hex(reinterpretAsString(x)); + +-- Rounding (this could be not towards the nearest) + +SELECT 1.1::BFloat16 AS x, + round(x), round(x, 1), round(x, 2), round(x, -1); From 866e4daeecb301030e3f89eb56395c1156cb840d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:10:49 +0100 Subject: [PATCH 403/566] Update index.md --- .../aggregate-functions/reference/index.md | 174 +++++++++--------- 1 file changed, 85 insertions(+), 89 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 2dce0afe2e1..d7b287f764b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -7,119 +7,115 @@ toc_hidden: true # List of Aggregate Functions -Standard aggregate functions: - -- [count](../reference/count.md) -- [min](../reference/min.md) -- [max](../reference/max.md) -- [sum](../reference/sum.md) -- [avg](../reference/avg.md) -- [any](../reference/any.md) -- [stddevPop](../reference/stddevpop.md) -- [stddevPopStable](../reference/stddevpopstable.md) -- [stddevSamp](../reference/stddevsamp.md) -- [stddevSampStable](../reference/stddevsampstable.md) -- [varPop](../reference/varpop.md) -- [varSamp](../reference/varsamp.md) -- [corr](../reference/corr.md) -- [corr](../reference/corrstable.md) -- [corrMatrix](../reference/corrmatrix.md) -- [covarPop](../reference/covarpop.md) -- [covarStable](../reference/covarpopstable.md) -- [covarPopMatrix](../reference/covarpopmatrix.md) -- [covarSamp](../reference/covarsamp.md) -- [covarSampStable](../reference/covarsampstable.md) -- [covarSampMatrix](../reference/covarsampmatrix.md) -- [entropy](../reference/entropy.md) -- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) -- [intervalLengthSum](../reference/intervalLengthSum.md) -- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) -- [mannwhitneyutest](../reference/mannwhitneyutest.md) -- [median](../reference/median.md) -- [rankCorr](../reference/rankCorr.md) -- [sumKahan](../reference/sumkahan.md) -- [studentTTest](../reference/studentttest.md) -- [welchTTest](../reference/welchttest.md) - -ClickHouse-specific aggregate functions: +ClickHouse supports all standard SQL functions (sum, avg, min, max, count) and a wide range of aggregate functions for various applications: - [aggThrow](../reference/aggthrow.md) - [analysisOfVariance](../reference/analysis_of_variance.md) -- [any](../reference/any.md) - [anyHeavy](../reference/anyheavy.md) - [anyLast](../reference/anylast.md) -- [boundingRatio](../reference/boundrat.md) -- [first_value](../reference/first_value.md) -- [last_value](../reference/last_value.md) -- [argMin](../reference/argmin.md) +- [any](../reference/any.md) - [argMax](../reference/argmax.md) +- [argMin](../reference/argmin.md) - [avgWeighted](../reference/avgweighted.md) -- [topK](../reference/topk.md) -- [topKWeighted](../reference/topkweighted.md) -- [deltaSum](../reference/deltasum.md) +- [avg](../reference/avg.md) +- [boundingRatio](../reference/boundrat.md) +- [categoricalInformationValue](../reference/categoricalinformationvalue.md) +- [contingency](../reference/contingency.md) +- [corrMatrix](../reference/corrmatrix.md) +- [corr](../reference/corr.md) +- [corr](../reference/corrstable.md) +- [count](../reference/count.md) +- [covarPopMatrix](../reference/covarpopmatrix.md) +- [covarPop](../reference/covarpop.md) +- [covarSampMatrix](../reference/covarsampmatrix.md) +- [covarSampStable](../reference/covarsampstable.md) +- [covarSamp](../reference/covarsamp.md) +- [covarStable](../reference/covarpopstable.md) +- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) +- [cramersV](../reference/cramersv.md) - [deltaSumTimestamp](../reference/deltasumtimestamp.md) +- [deltaSum](../reference/deltasum.md) +- [entropy](../reference/entropy.md) +- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) +- [first_value](../reference/first_value.md) - [flameGraph](../reference/flame_graph.md) -- [groupArray](../reference/grouparray.md) -- [groupArrayLast](../reference/grouparraylast.md) -- [groupUniqArray](../reference/groupuniqarray.md) - [groupArrayInsertAt](../reference/grouparrayinsertat.md) +- [groupArrayIntersect](../reference/grouparrayintersect.md) +- [groupArrayLast](../reference/grouparraylast.md) - [groupArrayMovingAvg](../reference/grouparraymovingavg.md) - [groupArrayMovingSum](../reference/grouparraymovingsum.md) - [groupArraySample](../reference/grouparraysample.md) - [groupArraySorted](../reference/grouparraysorted.md) -- [groupArrayIntersect](../reference/grouparrayintersect.md) +- [groupArray](../reference/grouparray.md) - [groupBitAnd](../reference/groupbitand.md) - [groupBitOr](../reference/groupbitor.md) - [groupBitXor](../reference/groupbitxor.md) -- [groupBitmap](../reference/groupbitmap.md) - [groupBitmapAnd](../reference/groupbitmapand.md) - [groupBitmapOr](../reference/groupbitmapor.md) - [groupBitmapXor](../reference/groupbitmapxor.md) -- [sumWithOverflow](../reference/sumwithoverflow.md) -- [sumMap](../reference/summap.md) -- [sumMapWithOverflow](../reference/summapwithoverflow.md) -- [sumMapFiltered](../parametric-functions.md/#summapfiltered) -- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) -- [minMap](../reference/minmap.md) -- [maxMap](../reference/maxmap.md) -- [skewSamp](../reference/skewsamp.md) -- [skewPop](../reference/skewpop.md) -- [kurtSamp](../reference/kurtsamp.md) +- [groupBitmap](../reference/groupbitmap.md) +- [groupUniqArray](../reference/groupuniqarray.md) +- [intervalLengthSum](../reference/intervalLengthSum.md) +- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) - [kurtPop](../reference/kurtpop.md) -- [uniq](../reference/uniq.md) -- [uniqExact](../reference/uniqexact.md) -- [uniqCombined](../reference/uniqcombined.md) -- [uniqCombined64](../reference/uniqcombined64.md) -- [uniqHLL12](../reference/uniqhll12.md) -- [uniqTheta](../reference/uniqthetasketch.md) -- [quantile](../reference/quantile.md) -- [quantiles](../reference/quantiles.md) -- [quantileExact](../reference/quantileexact.md) -- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) -- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) -- [quantileExactWeighted](../reference/quantileexactweighted.md) -- [quantileTiming](../reference/quantiletiming.md) -- [quantileTimingWeighted](../reference/quantiletimingweighted.md) -- [quantileDeterministic](../reference/quantiledeterministic.md) -- [quantileTDigest](../reference/quantiletdigest.md) -- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) -- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) -- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) -- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) -- [simpleLinearRegression](../reference/simplelinearregression.md) -- [singleValueOrNull](../reference/singlevalueornull.md) -- [stochasticLinearRegression](../reference/stochasticlinearregression.md) -- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) -- [categoricalInformationValue](../reference/categoricalinformationvalue.md) -- [contingency](../reference/contingency.md) -- [cramersV](../reference/cramersv.md) -- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) -- [theilsU](../reference/theilsu.md) -- [maxIntersections](../reference/maxintersections.md) +- [kurtSamp](../reference/kurtsamp.md) +- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) +- [last_value](../reference/last_value.md) +- [mannwhitneyutest](../reference/mannwhitneyutest.md) - [maxIntersectionsPosition](../reference/maxintersectionsposition.md) +- [maxIntersections](../reference/maxintersections.md) +- [maxMap](../reference/maxmap.md) +- [max](../reference/max.md) - [meanZTest](../reference/meanztest.md) +- [median](../reference/median.md) +- [minMap](../reference/minmap.md) +- [min](../reference/min.md) +- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) +- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) +- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) +- [quantileDeterministic](../reference/quantiledeterministic.md) +- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) +- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) +- [quantileExactWeighted](../reference/quantileexactweighted.md) +- [quantileExact](../reference/quantileexact.md) - [quantileGK](../reference/quantileGK.md) - [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md) +- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) +- [quantileTDigest](../reference/quantiletdigest.md) +- [quantileTimingWeighted](../reference/quantiletimingweighted.md) +- [quantileTiming](../reference/quantiletiming.md) +- [quantile](../reference/quantile.md) +- [quantiles](../reference/quantiles.md) +- [rankCorr](../reference/rankCorr.md) +- [simpleLinearRegression](../reference/simplelinearregression.md) +- [singleValueOrNull](../reference/singlevalueornull.md) +- [skewPop](../reference/skewpop.md) +- [skewSamp](../reference/skewsamp.md) - [sparkBar](../reference/sparkbar.md) +- [stddevPopStable](../reference/stddevpopstable.md) +- [stddevPop](../reference/stddevpop.md) +- [stddevSampStable](../reference/stddevsampstable.md) +- [stddevSamp](../reference/stddevsamp.md) +- [stochasticLinearRegression](../reference/stochasticlinearregression.md) +- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) +- [studentTTest](../reference/studentttest.md) - [sumCount](../reference/sumcount.md) -- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) +- [sumKahan](../reference/sumkahan.md) +- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) +- [sumMapFiltered](../parametric-functions.md/#summapfiltered) +- [sumMapWithOverflow](../reference/summapwithoverflow.md) +- [sumMap](../reference/summap.md) +- [sumWithOverflow](../reference/sumwithoverflow.md) +- [sum](../reference/sum.md) +- [theilsU](../reference/theilsu.md) +- [topKWeighted](../reference/topkweighted.md) +- [topK](../reference/topk.md) +- [uniqCombined64](../reference/uniqcombined64.md) +- [uniqCombined](../reference/uniqcombined.md) +- [uniqExact](../reference/uniqexact.md) +- [uniqHLL12](../reference/uniqhll12.md) +- [uniqTheta](../reference/uniqthetasketch.md) +- [uniq](../reference/uniq.md) +- [varPop](../reference/varpop.md) +- [varSamp](../reference/varsamp.md) +- [welchTTest](../reference/welchttest.md) From 2b20c2d2f22f9a399f4f43f0920f6b0df978c1a9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:46:00 +0100 Subject: [PATCH 404/566] Fix a race --- src/Databases/DatabaseAtomic.cpp | 14 +++++++++----- src/Databases/DatabaseAtomic.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 8 ++++++-- src/Databases/DatabaseOnDisk.h | 2 +- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 88727d0389e..bd077ccd7b5 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -19,6 +18,7 @@ #include #include + namespace fs = std::filesystem; namespace DB @@ -69,9 +69,13 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C void DatabaseAtomic::createDirectories() { - if (database_atomic_directories_created.test_and_set()) - return; - DatabaseOnDisk::createDirectories(); + std::lock_guard lock(mutex); + createDirectoriesUnlocked(); +} + +void DatabaseAtomic::createDirectoriesUnlocked() +{ + DatabaseOnDisk::createDirectoriesUnlocked(); fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); fs::create_directories(path_to_table_symlinks); tryCreateMetadataSymlink(); @@ -113,9 +117,9 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); - createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); + createDirectoriesUnlocked(); not_in_use = cleanupDetachedTables(); auto table_id = table->getStorageID(); assertDetachedTableNotInUse(table_id.uuid); diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 3d0b74e31a0..7e909128635 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -76,8 +76,8 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); - std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; void createDirectories(); + void createDirectoriesUnlocked() TSA_REQUIRES(mutex); void tryCreateMetadataSymlink(); diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 2f4280fe485..93ecf9cf11c 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -185,8 +185,12 @@ DatabaseOnDisk::DatabaseOnDisk( void DatabaseOnDisk::createDirectories() { - if (directories_created.test_and_set()) - return; + std::lock_guard lock(mutex); + createDirectoriesUnlocked(); +} + +void DatabaseOnDisk::createDirectoriesUnlocked() +{ fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 0c0ecf76a26..1e11d21cc87 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -99,8 +99,8 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} - std::atomic_flag directories_created = ATOMIC_FLAG_INIT; void createDirectories(); + void createDirectoriesUnlocked() TSA_REQUIRES(mutex); const String metadata_path; const String data_path; From 3a855f501cd5d16ff97e9dde8b6fcb2d3b7ae497 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 02:15:31 +0100 Subject: [PATCH 405/566] Cleanups --- base/base/DecomposedFloat.h | 2 +- base/base/wide_integer.h | 1 - base/base/wide_integer_impl.h | 8 +------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 3bd059cb21c..fef91adefb0 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -230,4 +230,4 @@ struct DecomposedFloat using DecomposedFloat64 = DecomposedFloat; using DecomposedFloat32 = DecomposedFloat; -using DecomposedFloat16 = DecomposedFloat<__bf16>; +using DecomposedFloat16 = DecomposedFloat; diff --git a/base/base/wide_integer.h b/base/base/wide_integer.h index baf6e490ada..f3a4dc9e6d5 100644 --- a/base/base/wide_integer.h +++ b/base/base/wide_integer.h @@ -118,7 +118,6 @@ public: constexpr operator long double() const noexcept; constexpr operator double() const noexcept; constexpr operator float() const noexcept; - constexpr operator __bf16() const noexcept; struct _impl; diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index d0bbd7df9d4..3787971a20e 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - std::is_floating_point_v || std::is_same_v, + std::is_floating_point_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, @@ -1300,12 +1300,6 @@ constexpr integer::operator float() const noexcept return static_cast(static_cast(*this)); } -template -constexpr integer::operator __bf16() const noexcept -{ - return static_cast<__bf16>(static_cast(*this)); -} - // Unary operators template constexpr integer operator~(const integer & lhs) noexcept From ab79efe40f8785a7bd947cd5919feafedfb88259 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Mon, 11 Nov 2024 14:07:19 +0800 Subject: [PATCH 406/566] make scale argument not optional --- .../functions/type-conversion-functions.md | 4 +- src/Functions/parseDateTime.cpp | 130 +++++++----------- .../03252_parse_datetime64.reference | 13 +- .../0_stateless/03252_parse_datetime64.sql | 38 ++--- ..._parse_datetime64_in_joda_syntax.reference | 5 - .../03252_parse_datetime64_in_joda_syntax.sql | 12 +- 6 files changed, 69 insertions(+), 133 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8043b21744a..72e6fda03f7 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6880,7 +6880,7 @@ parseDateTime64(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 6 if not specified. +- `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. @@ -6907,7 +6907,7 @@ parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 0 if not specified. +- `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 7190c1ad6f8..72e3ba661ca 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -607,87 +607,71 @@ namespace DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - FunctionArgumentDescriptors mandatory_args{ - {"time", static_cast(&isString), nullptr, "String"} - }; - + FunctionArgumentDescriptors mandatory_args; FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - optional_args = {{"scale/format", static_cast( - [](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); } - ), nullptr, "UInt or String"}, - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} + { + mandatory_args = { + {"time", static_cast(&isString), nullptr, "String"}, + {"scale", static_cast(&isUInt8), nullptr, "UInt8"} }; - else optional_args = { {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; + } + else + { + mandatory_args = {{"time", static_cast(&isString), nullptr, "String"}}; + optional_args = { + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; + } validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); DataTypePtr data_type; if constexpr (return_type == ReturnType::DateTime64) { - UInt32 scale = 0; - if (arguments.size() == 1) + UInt8 scale = 0; + if (isUInt8(arguments[1].type)) { - /// In MySQL parse syntax, the scale of microseond is 6. - if constexpr (parse_syntax == ParseSyntax::MySQL) - scale = 6; - } - else - { - if (isUInt(arguments[1].type)) - { - const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); - if (col_scale) - scale = col_scale->getValue(); - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input scale value may exceed the max scale value of `DateTime64`: {}.", - maxScaleOfDateTime64); - } + const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); + if (col_scale) + scale = col_scale->getValue(); else - { - if constexpr (parse_syntax == ParseSyntax::MySQL) - scale = 6; - } + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument is not Const(UInt8) type."); + } + if (parse_syntax == ParseSyntax::MySQL && scale != 6) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale value {} of MySQL parse syntax is not 6.", std::to_string(scale)); + if (scale > maxScaleOfDateTime64) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The scale argument's value {} exceed the max scale value {}.", std::to_string(scale), std::to_string(maxScaleOfDateTime64)); - /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by counting how many 'S' characters are contained in the format's microsceond fragment. - String format = getFormat(arguments, scale); - std::vector instructions = parseFormat(format); - for (const auto & instruction : instructions) + String format = getFormat(arguments, scale); + std::vector instructions = parseFormat(format); + for (const auto & instruction : instructions) + { + /// Check scale by counting how may 'S' characters exists in the format string. + const String & fragment = instruction.getFragment(); + UInt32 s_cnt = 0; + for (char ch : fragment) { - const String & fragment = instruction.getFragment(); - UInt32 val = 0; - for (char ch : fragment) + if (ch != 'S') { - if (ch != 'S') - { - val = 0; - break; - } - else - val++; + s_cnt = 0; + break; } - /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, - /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. - if (val != 0 && scale != 0 && val != scale) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The scale of input format string {} not equals the given scale value {}.", - format, - scale); - else if (scale == 0 && val != 0) - scale = val; + else + s_cnt++; } - if (scale > maxScaleOfDateTime64) + /// If the number of 'S' character in format string not euqals the scale, then throw an exception to report error. + if (s_cnt != 0 && s_cnt != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The scale of the input format string {} exceed the max scale value {}.", + "The scale of input format string {} not equals the given scale value {}.", format, - maxScaleOfDateTime64); + std::to_string(scale)); } data_type = std::make_shared(scale, time_zone_name); } @@ -2267,18 +2251,7 @@ namespace { size_t format_arg_index = 1; if constexpr (return_type == ReturnType::DateTime64) - { - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated - /// as default value `yyyy-MM-dd HH:mm:ss`. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, - /// then the second argument is the format. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, - /// then the third argument is the format. - if (arguments.size() > 1 && isString(removeNullable(arguments[1].type))) - format_arg_index = 1; - else - format_arg_index = 2; - } + format_arg_index = 2; if (arguments.size() <= format_arg_index) { @@ -2311,18 +2284,11 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - if (arguments.size() < 3) + if (return_type == ReturnType::DateTime && arguments.size() < 3) return DateLUT::instance(); - else if constexpr (return_type == ReturnType::DateTime64) - { - /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: - /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') - /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the - /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as - /// `DateLUT::instance()`. - if (isUInt(arguments[1].type) && arguments.size() < 4) - return DateLUT::instance(); - } + else if (return_type == ReturnType::DateTime64 && arguments.size() < 4) + return DateLUT::instance(); + size_t timezone_arg_index = arguments.size() - 1; const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) diff --git a/tests/queries/0_stateless/03252_parse_datetime64.reference b/tests/queries/0_stateless/03252_parse_datetime64.reference index 27dcef6bf68..263c9b5d8ea 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64.reference @@ -1,17 +1,8 @@ -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 1970-01-01 08:00:00.000000 -1970-01-01 08:00:00.000 -1970-01-01 08:00:00.000 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -1970-01-01 08:00:00.000 2024-10-09 10:30:10.123456 -\N -\N -\N -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -\N +1970-01-01 08:00:00.000000 diff --git a/tests/queries/0_stateless/03252_parse_datetime64.sql b/tests/queries/0_stateless/03252_parse_datetime64.sql index d28b6e586f7..2a6ef254887 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64.sql @@ -1,32 +1,22 @@ set session_timezone = 'Asia/Shanghai'; -select parseDateTime64('2024-10-09 10:30:10.123456'); -select parseDateTime64('2024-10-09 10:30:10.123'); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); -select parseDateTime64('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', 6,'%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); select parseDateTime64('2024-10-09 10:30:10.123', 6, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123'); -select parseDateTime64OrZero('2024-10-09 10:30:10', 3); -select parseDateTime64OrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64OrZero('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64OrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64OrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%fffff'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123'); -select parseDateTime64OrNull('2024-10-09 10:30:10', 3); -select parseDateTime64OrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64OrNull('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64OrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64OrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; -select parseDateTime64OrNull('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); \ No newline at end of file +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6,'%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%fffff'); \ No newline at end of file diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference index 0b4a28c4b38..99252ce55ca 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference @@ -1,12 +1,9 @@ 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 2024-10-10 01:30:10.123456 2024-10-10 01:30:10.123456 1970-01-01 08:00:00.000 -1970-01-01 08:00:00.000 -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 @@ -15,8 +12,6 @@ 2024-10-10 01:30:10.123456 1970-01-01 08:00:00.000000 \N -\N -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql index 8482677e9c9..bcb0fb5a362 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql @@ -1,11 +1,9 @@ set session_timezone = 'Asia/Shanghai'; select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); @@ -17,11 +15,9 @@ select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-M select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 3); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.', 3); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); @@ -33,11 +29,9 @@ select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-8000', 6, ' select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 3); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.', 3); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); From 0768e0b265dc3a7d83d3a4c3ea9ba8625fe70994 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Mon, 11 Nov 2024 14:26:58 +0800 Subject: [PATCH 407/566] update doc & comments --- .../functions/type-conversion-functions.md | 10 ++++----- src/Functions/parseDateTime.cpp | 22 ++++++------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 72e6fda03f7..a92d7055fd5 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6874,14 +6874,14 @@ Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datet **Syntax** ``` sql -parseDateTime64(str[, [scale, [format[, timezone]]]]) +parseDateTime64(str, scale, [format[, timezone]]) ``` **Arguments** -- `str` — The String to be parsed +- `str` — The String to be parsed. - `scale` - The scale of [DateTime64](../data-types/datetime64.md). -- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. +- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. **Returned value(s)** @@ -6901,12 +6901,12 @@ Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datet **Syntax** ``` sql -parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) +parseDateTime64InJodaSyntax(str, scale, [format[, timezone]]) ``` **Arguments** -- `str` — The String to be parsed +- `str` — The String to be parsed. - `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 72e3ba661ca..9fea8a4f130 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -608,26 +608,18 @@ namespace DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { FunctionArgumentDescriptors mandatory_args; - FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - { mandatory_args = { {"time", static_cast(&isString), nullptr, "String"}, - {"scale", static_cast(&isUInt8), nullptr, "UInt8"} + {"scale", static_cast(&isUInt8), &isColumnConst, "UInt8"} }; - optional_args = { - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - } else - { mandatory_args = {{"time", static_cast(&isString), nullptr, "String"}}; - optional_args = { - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - } + + FunctionArgumentDescriptors optional_args{ + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); @@ -644,7 +636,7 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument is not Const(UInt8) type."); } if (parse_syntax == ParseSyntax::MySQL && scale != 6) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale value {} of MySQL parse syntax is not 6.", std::to_string(scale)); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument's value {} of MySQL parse syntax is not 6.", std::to_string(scale)); if (scale > maxScaleOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument's value {} exceed the max scale value {}.", std::to_string(scale), std::to_string(maxScaleOfDateTime64)); From f57bf2ee15fe93377b858efec767627321a69887 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Nov 2024 09:38:26 +0000 Subject: [PATCH 408/566] Fix trash in the docs, pt. II --- .../getting-started/example-datasets/tpch.md | 2 +- .../aggregate-functions/reference/index.md | 113 +----------------- .../data-types/aggregatefunction.md | 4 +- docs/en/sql-reference/data-types/index.md | 29 +---- docs/en/sql-reference/data-types/json.md | 2 +- .../data-types/simpleaggregatefunction.md | 4 +- docs/en/sql-reference/functions/geo/index.md | 68 +---------- .../sql-reference/statements/create/index.md | 14 +-- docs/en/sql-reference/statements/index.md | 25 +--- 9 files changed, 16 insertions(+), 245 deletions(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index de2c425b402..3ea4bffec38 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -46,7 +46,7 @@ Detailed table sizes with scale factor 100: | orders | 150.000.000 | 6.15 GB | | lineitem | 600.00.00 | 26.69 GB | -(The table sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions. +(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.) Now create tables in ClickHouse. diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index d7b287f764b..ee8f0d5882e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -7,115 +7,4 @@ toc_hidden: true # List of Aggregate Functions -ClickHouse supports all standard SQL functions (sum, avg, min, max, count) and a wide range of aggregate functions for various applications: - -- [aggThrow](../reference/aggthrow.md) -- [analysisOfVariance](../reference/analysis_of_variance.md) -- [anyHeavy](../reference/anyheavy.md) -- [anyLast](../reference/anylast.md) -- [any](../reference/any.md) -- [argMax](../reference/argmax.md) -- [argMin](../reference/argmin.md) -- [avgWeighted](../reference/avgweighted.md) -- [avg](../reference/avg.md) -- [boundingRatio](../reference/boundrat.md) -- [categoricalInformationValue](../reference/categoricalinformationvalue.md) -- [contingency](../reference/contingency.md) -- [corrMatrix](../reference/corrmatrix.md) -- [corr](../reference/corr.md) -- [corr](../reference/corrstable.md) -- [count](../reference/count.md) -- [covarPopMatrix](../reference/covarpopmatrix.md) -- [covarPop](../reference/covarpop.md) -- [covarSampMatrix](../reference/covarsampmatrix.md) -- [covarSampStable](../reference/covarsampstable.md) -- [covarSamp](../reference/covarsamp.md) -- [covarStable](../reference/covarpopstable.md) -- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) -- [cramersV](../reference/cramersv.md) -- [deltaSumTimestamp](../reference/deltasumtimestamp.md) -- [deltaSum](../reference/deltasum.md) -- [entropy](../reference/entropy.md) -- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) -- [first_value](../reference/first_value.md) -- [flameGraph](../reference/flame_graph.md) -- [groupArrayInsertAt](../reference/grouparrayinsertat.md) -- [groupArrayIntersect](../reference/grouparrayintersect.md) -- [groupArrayLast](../reference/grouparraylast.md) -- [groupArrayMovingAvg](../reference/grouparraymovingavg.md) -- [groupArrayMovingSum](../reference/grouparraymovingsum.md) -- [groupArraySample](../reference/grouparraysample.md) -- [groupArraySorted](../reference/grouparraysorted.md) -- [groupArray](../reference/grouparray.md) -- [groupBitAnd](../reference/groupbitand.md) -- [groupBitOr](../reference/groupbitor.md) -- [groupBitXor](../reference/groupbitxor.md) -- [groupBitmapAnd](../reference/groupbitmapand.md) -- [groupBitmapOr](../reference/groupbitmapor.md) -- [groupBitmapXor](../reference/groupbitmapxor.md) -- [groupBitmap](../reference/groupbitmap.md) -- [groupUniqArray](../reference/groupuniqarray.md) -- [intervalLengthSum](../reference/intervalLengthSum.md) -- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) -- [kurtPop](../reference/kurtpop.md) -- [kurtSamp](../reference/kurtsamp.md) -- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) -- [last_value](../reference/last_value.md) -- [mannwhitneyutest](../reference/mannwhitneyutest.md) -- [maxIntersectionsPosition](../reference/maxintersectionsposition.md) -- [maxIntersections](../reference/maxintersections.md) -- [maxMap](../reference/maxmap.md) -- [max](../reference/max.md) -- [meanZTest](../reference/meanztest.md) -- [median](../reference/median.md) -- [minMap](../reference/minmap.md) -- [min](../reference/min.md) -- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) -- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) -- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) -- [quantileDeterministic](../reference/quantiledeterministic.md) -- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) -- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) -- [quantileExactWeighted](../reference/quantileexactweighted.md) -- [quantileExact](../reference/quantileexact.md) -- [quantileGK](../reference/quantileGK.md) -- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md) -- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) -- [quantileTDigest](../reference/quantiletdigest.md) -- [quantileTimingWeighted](../reference/quantiletimingweighted.md) -- [quantileTiming](../reference/quantiletiming.md) -- [quantile](../reference/quantile.md) -- [quantiles](../reference/quantiles.md) -- [rankCorr](../reference/rankCorr.md) -- [simpleLinearRegression](../reference/simplelinearregression.md) -- [singleValueOrNull](../reference/singlevalueornull.md) -- [skewPop](../reference/skewpop.md) -- [skewSamp](../reference/skewsamp.md) -- [sparkBar](../reference/sparkbar.md) -- [stddevPopStable](../reference/stddevpopstable.md) -- [stddevPop](../reference/stddevpop.md) -- [stddevSampStable](../reference/stddevsampstable.md) -- [stddevSamp](../reference/stddevsamp.md) -- [stochasticLinearRegression](../reference/stochasticlinearregression.md) -- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) -- [studentTTest](../reference/studentttest.md) -- [sumCount](../reference/sumcount.md) -- [sumKahan](../reference/sumkahan.md) -- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) -- [sumMapFiltered](../parametric-functions.md/#summapfiltered) -- [sumMapWithOverflow](../reference/summapwithoverflow.md) -- [sumMap](../reference/summap.md) -- [sumWithOverflow](../reference/sumwithoverflow.md) -- [sum](../reference/sum.md) -- [theilsU](../reference/theilsu.md) -- [topKWeighted](../reference/topkweighted.md) -- [topK](../reference/topk.md) -- [uniqCombined64](../reference/uniqcombined64.md) -- [uniqCombined](../reference/uniqcombined.md) -- [uniqExact](../reference/uniqexact.md) -- [uniqHLL12](../reference/uniqhll12.md) -- [uniqTheta](../reference/uniqthetasketch.md) -- [uniq](../reference/uniq.md) -- [varPop](../reference/varpop.md) -- [varSamp](../reference/varsamp.md) -- [welchTTest](../reference/welchttest.md) +ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions. diff --git a/docs/en/sql-reference/data-types/aggregatefunction.md b/docs/en/sql-reference/data-types/aggregatefunction.md index 37f0d0e50ae..4cad27db68b 100644 --- a/docs/en/sql-reference/data-types/aggregatefunction.md +++ b/docs/en/sql-reference/data-types/aggregatefunction.md @@ -6,7 +6,9 @@ sidebar_label: AggregateFunction # AggregateFunction -Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. +Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). +The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. +To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. `AggregateFunction(name, types_of_arguments...)` — parametric data type. diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index 2b89dd145e6..134678f71bb 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -6,29 +6,8 @@ sidebar_position: 1 # Data Types in ClickHouse -ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any. +This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md). -:::note -You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table. -::: - -ClickHouse data types include: - -- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`) -- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md) -- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) -- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) -- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time -- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated) -- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column -- **UUID**: a performant option for storing [`UUID` values](./uuid.md) -- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column -- **Arrays**: any column can be defined as an [`Array` of values](./array.md) -- **Maps**: use [`Map`](./map.md) for storing key/value pairs -- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results -- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell -- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. -- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type) -- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses -- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` -- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) +System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an +overview of all available data types. +It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`). diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index e48b308a620..ce69f15f0fa 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -7,7 +7,7 @@ keywords: [object, data type] # Object Data Type (deprecated) -**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). +**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 4fb74ac30e4..8edd8b5b8ff 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction --- # SimpleAggregateFunction -`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. +`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. +This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. +This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix. diff --git a/docs/en/sql-reference/functions/geo/index.md b/docs/en/sql-reference/functions/geo/index.md index d46e60281e2..51b6868611a 100644 --- a/docs/en/sql-reference/functions/geo/index.md +++ b/docs/en/sql-reference/functions/geo/index.md @@ -5,70 +5,4 @@ sidebar_position: 62 title: "Geo Functions" --- - -## Geographical Coordinates Functions - -- [greatCircleDistance](./coordinates.md#greatcircledistance) -- [geoDistance](./coordinates.md#geodistance) -- [greatCircleAngle](./coordinates.md#greatcircleangle) -- [pointInEllipses](./coordinates.md#pointinellipses) -- [pointInPolygon](./coordinates.md#pointinpolygon) - -## Geohash Functions -- [geohashEncode](./geohash.md#geohashencode) -- [geohashDecode](./geohash.md#geohashdecode) -- [geohashesInBox](./geohash.md#geohashesinbox) - -## H3 Indexes Functions - -- [h3IsValid](./h3.md#h3isvalid) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3EdgeAngle](./h3.md#h3edgeangle) -- [h3EdgeLengthM](./h3.md#h3edgelengthm) -- [h3EdgeLengthKm](./h3.md#h3edgelengthkm) -- [geoToH3](./h3.md#geotoh3) -- [h3ToGeo](./h3.md#h3togeo) -- [h3ToGeoBoundary](./h3.md#h3togeoboundary) -- [h3kRing](./h3.md#h3kring) -- [h3GetBaseCell](./h3.md#h3getbasecell) -- [h3HexAreaM2](./h3.md#h3hexaream2) -- [h3HexAreaKm2](./h3.md#h3hexareakm2) -- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors) -- [h3ToChildren](./h3.md#h3tochildren) -- [h3ToParent](./h3.md#h3toparent) -- [h3ToString](./h3.md#h3tostring) -- [stringToH3](./h3.md#stringtoh3) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3IsResClassIII](./h3.md#h3isresclassiii) -- [h3IsPentagon](./h3.md#h3ispentagon) -- [h3GetFaces](./h3.md#h3getfaces) -- [h3CellAreaM2](./h3.md#h3cellaream2) -- [h3CellAreaRads2](./h3.md#h3cellarearads2) -- [h3ToCenterChild](./h3.md#h3tocenterchild) -- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm) -- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm) -- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads) -- [h3NumHexagons](./h3.md#h3numhexagons) -- [h3Line](./h3.md#h3line) -- [h3Distance](./h3.md#h3distance) -- [h3HexRing](./h3.md#h3hexring) -- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge) -- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid) -- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge) -- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge) -- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge) -- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon) -- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary) - -## S2 Index Functions - -- [geoToS2](./s2.md#geotos2) -- [s2ToGeo](./s2.md#s2togeo) -- [s2GetNeighbors](./s2.md#s2getneighbors) -- [s2CellsIntersect](./s2.md#s2cellsintersect) -- [s2CapContains](./s2.md#s2capcontains) -- [s2CapUnion](./s2.md#s2capunion) -- [s2RectAdd](./s2.md#s2rectadd) -- [s2RectContains](./s2.md#s2rectcontains) -- [s2RectUnion](./s2.md#s2rectunion) -- [s2RectIntersection](./s2.md#s2rectintersection) +Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md). diff --git a/docs/en/sql-reference/statements/create/index.md b/docs/en/sql-reference/statements/create/index.md index fa39526a53e..5854d7cf9d2 100644 --- a/docs/en/sql-reference/statements/create/index.md +++ b/docs/en/sql-reference/statements/create/index.md @@ -6,16 +6,4 @@ sidebar_label: CREATE # CREATE Queries -Create queries make a new entity of one of the following kinds: - -- [DATABASE](/docs/en/sql-reference/statements/create/database.md) -- [TABLE](/docs/en/sql-reference/statements/create/table.md) -- [VIEW](/docs/en/sql-reference/statements/create/view.md) -- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md) -- [FUNCTION](/docs/en/sql-reference/statements/create/function.md) -- [USER](/docs/en/sql-reference/statements/create/user.md) -- [ROLE](/docs/en/sql-reference/statements/create/role.md) -- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md) -- [QUOTA](/docs/en/sql-reference/statements/create/quota.md) -- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md) -- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md) +CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md). diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index 5aa61cf8d21..f288b30b27b 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -6,27 +6,4 @@ sidebar_label: List of statements # ClickHouse SQL Statements -Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately: - -- [SELECT](/docs/en/sql-reference/statements/select/index.md) -- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md) -- [CREATE](/docs/en/sql-reference/statements/create/index.md) -- [ALTER](/docs/en/sql-reference/statements/alter/index.md) -- [SYSTEM](/docs/en/sql-reference/statements/system.md) -- [SHOW](/docs/en/sql-reference/statements/show.md) -- [GRANT](/docs/en/sql-reference/statements/grant.md) -- [REVOKE](/docs/en/sql-reference/statements/revoke.md) -- [ATTACH](/docs/en/sql-reference/statements/attach.md) -- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md) -- [DETACH](/docs/en/sql-reference/statements/detach.md) -- [DROP](/docs/en/sql-reference/statements/drop.md) -- [EXISTS](/docs/en/sql-reference/statements/exists.md) -- [KILL](/docs/en/sql-reference/statements/kill.md) -- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) -- [RENAME](/docs/en/sql-reference/statements/rename.md) -- [SET](/docs/en/sql-reference/statements/set.md) -- [SET ROLE](/docs/en/sql-reference/statements/set-role.md) -- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md) -- [USE](/docs/en/sql-reference/statements/use.md) -- [EXPLAIN](/docs/en/sql-reference/statements/explain.md) +Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md). From 5aa9e64070cda74b65fa6cb639e2c83cd1abee67 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Nov 2024 10:11:23 +0000 Subject: [PATCH 409/566] Fix spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a58b5e9ff58..a0d4d1d349e 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1811,6 +1811,7 @@ geocode geohash geohashDecode geohashEncode +geohashes geohashesInBox geoip geospatial From 206bd174c37a7e6ea47eda9c228c2aa6a5f2fff3 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 2 Nov 2024 19:30:03 +0100 Subject: [PATCH 410/566] Corrections after reworking backup/restore synchronization. --- src/Backups/BackupConcurrencyCheck.cpp | 16 +- src/Backups/BackupConcurrencyCheck.h | 11 +- src/Backups/BackupCoordinationCleaner.cpp | 36 +- src/Backups/BackupCoordinationCleaner.h | 17 +- src/Backups/BackupCoordinationLocal.cpp | 3 +- src/Backups/BackupCoordinationLocal.h | 13 +- src/Backups/BackupCoordinationOnCluster.cpp | 95 +- src/Backups/BackupCoordinationOnCluster.h | 20 +- src/Backups/BackupCoordinationStageSync.cpp | 895 ++++++++++++------- src/Backups/BackupCoordinationStageSync.h | 97 +- src/Backups/BackupsWorker.cpp | 108 ++- src/Backups/BackupsWorker.h | 2 - src/Backups/IBackupCoordination.h | 28 +- src/Backups/IRestoreCoordination.h | 28 +- src/Backups/RestoreCoordinationLocal.cpp | 4 +- src/Backups/RestoreCoordinationLocal.h | 14 +- src/Backups/RestoreCoordinationOnCluster.cpp | 95 +- src/Backups/RestoreCoordinationOnCluster.h | 20 +- 18 files changed, 887 insertions(+), 615 deletions(-) diff --git a/src/Backups/BackupConcurrencyCheck.cpp b/src/Backups/BackupConcurrencyCheck.cpp index 8b29ae41b53..a67d241845d 100644 --- a/src/Backups/BackupConcurrencyCheck.cpp +++ b/src/Backups/BackupConcurrencyCheck.cpp @@ -14,12 +14,12 @@ namespace ErrorCodes BackupConcurrencyCheck::BackupConcurrencyCheck( - const UUID & backup_or_restore_uuid_, bool is_restore_, bool on_cluster_, + const String & zookeeper_path_, bool allow_concurrency_, BackupConcurrencyCounters & counters_) - : is_restore(is_restore_), backup_or_restore_uuid(backup_or_restore_uuid_), on_cluster(on_cluster_), counters(counters_) + : is_restore(is_restore_), on_cluster(on_cluster_), zookeeper_path(zookeeper_path_), counters(counters_) { std::lock_guard lock{counters.mutex}; @@ -32,7 +32,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck( size_t num_on_cluster_restores = counters.on_cluster_restores.size(); if (on_cluster) { - if (!counters.on_cluster_restores.contains(backup_or_restore_uuid)) + if (!counters.on_cluster_restores.contains(zookeeper_path)) ++num_on_cluster_restores; } else @@ -47,7 +47,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck( size_t num_on_cluster_backups = counters.on_cluster_backups.size(); if (on_cluster) { - if (!counters.on_cluster_backups.contains(backup_or_restore_uuid)) + if (!counters.on_cluster_backups.contains(zookeeper_path)) ++num_on_cluster_backups; } else @@ -64,9 +64,9 @@ BackupConcurrencyCheck::BackupConcurrencyCheck( if (on_cluster) { if (is_restore) - ++counters.on_cluster_restores[backup_or_restore_uuid]; + ++counters.on_cluster_restores[zookeeper_path]; else - ++counters.on_cluster_backups[backup_or_restore_uuid]; + ++counters.on_cluster_backups[zookeeper_path]; } else { @@ -86,7 +86,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck() { if (is_restore) { - auto it = counters.on_cluster_restores.find(backup_or_restore_uuid); + auto it = counters.on_cluster_restores.find(zookeeper_path); if (it != counters.on_cluster_restores.end()) { if (!--it->second) @@ -95,7 +95,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck() } else { - auto it = counters.on_cluster_backups.find(backup_or_restore_uuid); + auto it = counters.on_cluster_backups.find(zookeeper_path); if (it != counters.on_cluster_backups.end()) { if (!--it->second) diff --git a/src/Backups/BackupConcurrencyCheck.h b/src/Backups/BackupConcurrencyCheck.h index 048a23a716a..a1baeff5464 100644 --- a/src/Backups/BackupConcurrencyCheck.h +++ b/src/Backups/BackupConcurrencyCheck.h @@ -1,7 +1,8 @@ #pragma once -#include +#include #include +#include #include #include @@ -19,9 +20,9 @@ public: /// Checks concurrency of a BACKUP operation or a RESTORE operation. /// Keep a constructed instance of BackupConcurrencyCheck until the operation is done. BackupConcurrencyCheck( - const UUID & backup_or_restore_uuid_, bool is_restore_, bool on_cluster_, + const String & zookeeper_path_, bool allow_concurrency_, BackupConcurrencyCounters & counters_); @@ -31,8 +32,8 @@ public: private: const bool is_restore; - const UUID backup_or_restore_uuid; const bool on_cluster; + const String zookeeper_path; BackupConcurrencyCounters & counters; }; @@ -47,8 +48,8 @@ private: friend class BackupConcurrencyCheck; size_t local_backups TSA_GUARDED_BY(mutex) = 0; size_t local_restores TSA_GUARDED_BY(mutex) = 0; - std::unordered_map on_cluster_backups TSA_GUARDED_BY(mutex); - std::unordered_map on_cluster_restores TSA_GUARDED_BY(mutex); + std::unordered_map on_cluster_backups TSA_GUARDED_BY(mutex); + std::unordered_map on_cluster_restores TSA_GUARDED_BY(mutex); std::mutex mutex; }; diff --git a/src/Backups/BackupCoordinationCleaner.cpp b/src/Backups/BackupCoordinationCleaner.cpp index 1f5068a94de..47095f27eb3 100644 --- a/src/Backups/BackupCoordinationCleaner.cpp +++ b/src/Backups/BackupCoordinationCleaner.cpp @@ -4,31 +4,29 @@ namespace DB { -BackupCoordinationCleaner::BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_) - : zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_) +BackupCoordinationCleaner::BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_) + : is_restore(is_restore_), zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_) { } -void BackupCoordinationCleaner::cleanup() +bool BackupCoordinationCleaner::cleanup(bool throw_if_error) { - tryRemoveAllNodes(/* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal); + WithRetries::Kind retries_kind = throw_if_error ? WithRetries::kNormal : WithRetries::kErrorHandling; + return cleanupImpl(throw_if_error, retries_kind); } -bool BackupCoordinationCleaner::tryCleanupAfterError() noexcept -{ - return tryRemoveAllNodes(/* throw_if_error = */ false, /* retries_kind = */ WithRetries::kNormal); -} - -bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind) +bool BackupCoordinationCleaner::cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind) { { std::lock_guard lock{mutex}; - if (cleanup_result.succeeded) - return true; - if (cleanup_result.exception) + if (succeeded) { - if (throw_if_error) - std::rethrow_exception(cleanup_result.exception); + LOG_TRACE(log, "Nodes from ZooKeeper are already removed"); + return true; + } + if (tried) + { + LOG_INFO(log, "Skipped removing nodes from ZooKeeper because because earlier we failed to do that"); return false; } } @@ -44,16 +42,18 @@ bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetri }); std::lock_guard lock{mutex}; - cleanup_result.succeeded = true; + tried = true; + succeeded = true; return true; } catch (...) { - LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this restore: {}", + LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this {}: {}", + is_restore ? "restore" : "backup", getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true)); std::lock_guard lock{mutex}; - cleanup_result.exception = std::current_exception(); + tried = true; if (throw_if_error) throw; diff --git a/src/Backups/BackupCoordinationCleaner.h b/src/Backups/BackupCoordinationCleaner.h index 43e095d9f33..c760a3611f9 100644 --- a/src/Backups/BackupCoordinationCleaner.h +++ b/src/Backups/BackupCoordinationCleaner.h @@ -12,14 +12,14 @@ namespace DB class BackupCoordinationCleaner { public: - BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_); + BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_); - void cleanup(); - bool tryCleanupAfterError() noexcept; + bool cleanup(bool throw_if_error); private: - bool tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind); + bool cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind); + const bool is_restore; const String zookeeper_path; /// A reference to a field of the parent object which is either BackupCoordinationOnCluster or RestoreCoordinationOnCluster. @@ -27,13 +27,8 @@ private: const LoggerPtr log; - struct CleanupResult - { - bool succeeded = false; - std::exception_ptr exception; - }; - CleanupResult cleanup_result TSA_GUARDED_BY(mutex); - + bool tried TSA_GUARDED_BY(mutex) = false; + bool succeeded TSA_GUARDED_BY(mutex) = false; std::mutex mutex; }; diff --git a/src/Backups/BackupCoordinationLocal.cpp b/src/Backups/BackupCoordinationLocal.cpp index 8bd6b4d327d..402e789eacb 100644 --- a/src/Backups/BackupCoordinationLocal.cpp +++ b/src/Backups/BackupCoordinationLocal.cpp @@ -11,12 +11,11 @@ namespace DB { BackupCoordinationLocal::BackupCoordinationLocal( - const UUID & backup_uuid_, bool is_plain_backup_, bool allow_concurrent_backup_, BackupConcurrencyCounters & concurrency_counters_) : log(getLogger("BackupCoordinationLocal")) - , concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ false, allow_concurrent_backup_, concurrency_counters_) + , concurrency_check(/* is_restore = */ false, /* on_cluster = */ false, /* zookeeper_path = */ "", allow_concurrent_backup_, concurrency_counters_) , file_infos(is_plain_backup_) { } diff --git a/src/Backups/BackupCoordinationLocal.h b/src/Backups/BackupCoordinationLocal.h index 09991c0d301..e63fcde981a 100644 --- a/src/Backups/BackupCoordinationLocal.h +++ b/src/Backups/BackupCoordinationLocal.h @@ -23,20 +23,19 @@ class BackupCoordinationLocal : public IBackupCoordination { public: explicit BackupCoordinationLocal( - const UUID & backup_uuid_, bool is_plain_backup_, bool allow_concurrent_backup_, BackupConcurrencyCounters & concurrency_counters_); ~BackupCoordinationLocal() override; + void setBackupQueryIsSentToOtherHosts() override {} + bool isBackupQuerySentToOtherHosts() const override { return false; } Strings setStage(const String &, const String &, bool) override { return {}; } - void setBackupQueryWasSentToOtherHosts() override {} - bool trySetError(std::exception_ptr) override { return true; } - void finish() override {} - bool tryFinishAfterError() noexcept override { return true; } - void waitForOtherHostsToFinish() override {} - bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; } + bool setError(std::exception_ptr, bool) override { return true; } + bool waitOtherHostsFinish(bool) const override { return true; } + bool finish(bool) override { return true; } + bool cleanup(bool) override { return true; } void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector & part_names_and_checksums) override; diff --git a/src/Backups/BackupCoordinationOnCluster.cpp b/src/Backups/BackupCoordinationOnCluster.cpp index dc34939f805..1b14f226eff 100644 --- a/src/Backups/BackupCoordinationOnCluster.cpp +++ b/src/Backups/BackupCoordinationOnCluster.cpp @@ -184,17 +184,21 @@ BackupCoordinationOnCluster::BackupCoordinationOnCluster( , plain_backup(is_plain_backup_) , log(getLogger("BackupCoordinationOnCluster")) , with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); }) - , concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ true, allow_concurrent_backup_, concurrency_counters_) - , stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, with_retries, schedule_, process_list_element_, log) - , cleaner(zookeeper_path, with_retries, log) + , cleaner(/* is_restore = */ false, zookeeper_path, with_retries, log) + , stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, concurrency_counters_, with_retries, schedule_, process_list_element_, log) { - createRootNodes(); + try + { + createRootNodes(); + } + catch (...) + { + stage_sync.setError(std::current_exception(), /* throw_if_error = */ false); + throw; + } } -BackupCoordinationOnCluster::~BackupCoordinationOnCluster() -{ - tryFinishImpl(); -} +BackupCoordinationOnCluster::~BackupCoordinationOnCluster() = default; void BackupCoordinationOnCluster::createRootNodes() { @@ -217,69 +221,52 @@ void BackupCoordinationOnCluster::createRootNodes() }); } +void BackupCoordinationOnCluster::setBackupQueryIsSentToOtherHosts() +{ + stage_sync.setQueryIsSentToOtherHosts(); +} + +bool BackupCoordinationOnCluster::isBackupQuerySentToOtherHosts() const +{ + return stage_sync.isQuerySentToOtherHosts(); +} + Strings BackupCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync) { stage_sync.setStage(new_stage, message); - - if (!sync) - return {}; - - return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator); + if (sync) + return stage_sync.waitHostsReachStage(all_hosts_without_initiator, new_stage); + return {}; } -void BackupCoordinationOnCluster::setBackupQueryWasSentToOtherHosts() +bool BackupCoordinationOnCluster::setError(std::exception_ptr exception, bool throw_if_error) { - backup_query_was_sent_to_other_hosts = true; + return stage_sync.setError(exception, throw_if_error); } -bool BackupCoordinationOnCluster::trySetError(std::exception_ptr exception) +bool BackupCoordinationOnCluster::waitOtherHostsFinish(bool throw_if_error) const { - return stage_sync.trySetError(exception); + return stage_sync.waitOtherHostsFinish(throw_if_error); } -void BackupCoordinationOnCluster::finish() +bool BackupCoordinationOnCluster::finish(bool throw_if_error) { - bool other_hosts_also_finished = false; - stage_sync.finish(other_hosts_also_finished); - - if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts)) - cleaner.cleanup(); + return stage_sync.finish(throw_if_error); } -bool BackupCoordinationOnCluster::tryFinishAfterError() noexcept +bool BackupCoordinationOnCluster::cleanup(bool throw_if_error) { - return tryFinishImpl(); -} - -bool BackupCoordinationOnCluster::tryFinishImpl() noexcept -{ - bool other_hosts_also_finished = false; - if (!stage_sync.tryFinishAfterError(other_hosts_also_finished)) - return false; - - if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts)) + /// All the hosts must finish before we remove the coordination nodes. + bool expect_other_hosts_finished = stage_sync.isQuerySentToOtherHosts() || !stage_sync.isErrorSet(); + bool all_hosts_finished = stage_sync.finished() && (stage_sync.otherHostsFinished() || !expect_other_hosts_finished); + if (!all_hosts_finished) { - if (!cleaner.tryCleanupAfterError()) - return false; - } - - return true; -} - -void BackupCoordinationOnCluster::waitForOtherHostsToFinish() -{ - if ((current_host != kInitiator) || !backup_query_was_sent_to_other_hosts) - return; - stage_sync.waitForOtherHostsToFinish(); -} - -bool BackupCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept -{ - if (current_host != kInitiator) + auto unfinished_hosts = expect_other_hosts_finished ? stage_sync.getUnfinishedHosts() : Strings{current_host}; + LOG_INFO(log, "Skipping removing nodes from ZooKeeper because hosts {} didn't finish", + BackupCoordinationStageSync::getHostsDesc(unfinished_hosts)); return false; - if (!backup_query_was_sent_to_other_hosts) - return true; - return stage_sync.tryWaitForOtherHostsToFinishAfterError(); + } + return cleaner.cleanup(throw_if_error); } ZooKeeperRetriesInfo BackupCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const diff --git a/src/Backups/BackupCoordinationOnCluster.h b/src/Backups/BackupCoordinationOnCluster.h index 7369c2cc746..b439ab619d8 100644 --- a/src/Backups/BackupCoordinationOnCluster.h +++ b/src/Backups/BackupCoordinationOnCluster.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include @@ -20,7 +19,7 @@ class BackupCoordinationOnCluster : public IBackupCoordination { public: /// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER query. - static const constexpr std::string_view kInitiator; + static const constexpr std::string_view kInitiator = BackupCoordinationStageSync::kInitiator; BackupCoordinationOnCluster( const UUID & backup_uuid_, @@ -37,13 +36,13 @@ public: ~BackupCoordinationOnCluster() override; + void setBackupQueryIsSentToOtherHosts() override; + bool isBackupQuerySentToOtherHosts() const override; Strings setStage(const String & new_stage, const String & message, bool sync) override; - void setBackupQueryWasSentToOtherHosts() override; - bool trySetError(std::exception_ptr exception) override; - void finish() override; - bool tryFinishAfterError() noexcept override; - void waitForOtherHostsToFinish() override; - bool tryWaitForOtherHostsToFinishAfterError() noexcept override; + bool setError(std::exception_ptr exception, bool throw_if_error) override; + bool waitOtherHostsFinish(bool throw_if_error) const override; + bool finish(bool throw_if_error) override; + bool cleanup(bool throw_if_error) override; void addReplicatedPartNames( const String & table_zk_path, @@ -110,11 +109,10 @@ private: const bool plain_backup; LoggerPtr const log; + /// The order is important: `stage_sync` must be initialized after `with_retries` and `cleaner`. const WithRetries with_retries; - BackupConcurrencyCheck concurrency_check; - BackupCoordinationStageSync stage_sync; BackupCoordinationCleaner cleaner; - std::atomic backup_query_was_sent_to_other_hosts = false; + BackupCoordinationStageSync stage_sync; mutable std::optional replicated_tables TSA_GUARDED_BY(replicated_tables_mutex); mutable std::optional replicated_access TSA_GUARDED_BY(replicated_access_mutex); diff --git a/src/Backups/BackupCoordinationStageSync.cpp b/src/Backups/BackupCoordinationStageSync.cpp index 9a05f9490c2..fcf09d7c315 100644 --- a/src/Backups/BackupCoordinationStageSync.cpp +++ b/src/Backups/BackupCoordinationStageSync.cpp @@ -42,9 +42,6 @@ namespace kCurrentVersion = 2, }; - - /// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER or RESTORE ON CLUSTER query. - const constexpr std::string_view kInitiator; } bool BackupCoordinationStageSync::HostInfo::operator ==(const HostInfo & other) const @@ -63,12 +60,32 @@ bool BackupCoordinationStageSync::State::operator ==(const State & other) const bool BackupCoordinationStageSync::State::operator !=(const State & other) const = default; +void BackupCoordinationStageSync::State::merge(const State & other) +{ + if (other.host_with_error && !host_with_error) + { + const String & host = *other.host_with_error; + host_with_error = host; + hosts.at(host).exception = other.hosts.at(host).exception; + } + + for (const auto & [host, other_host_info] : other.hosts) + { + auto & host_info = hosts.at(host); + host_info.stages.insert(other_host_info.stages.begin(), other_host_info.stages.end()); + if (other_host_info.finished) + host_info.finished = true; + } +} + + BackupCoordinationStageSync::BackupCoordinationStageSync( bool is_restore_, const String & zookeeper_path_, const String & current_host_, const Strings & all_hosts_, bool allow_concurrency_, + BackupConcurrencyCounters & concurrency_counters_, const WithRetries & with_retries_, ThreadPoolCallbackRunnerUnsafe schedule_, QueryStatusPtr process_list_element_, @@ -89,35 +106,29 @@ BackupCoordinationStageSync::BackupCoordinationStageSync( , max_attempts_after_bad_version(with_retries.getKeeperSettings().max_attempts_after_bad_version) , zookeeper_path(zookeeper_path_) , root_zookeeper_path(zookeeper_path.parent_path().parent_path()) - , operation_node_path(zookeeper_path.parent_path()) + , operation_zookeeper_path(zookeeper_path.parent_path()) , operation_node_name(zookeeper_path.parent_path().filename()) - , stage_node_path(zookeeper_path) , start_node_path(zookeeper_path / ("started|" + current_host)) , finish_node_path(zookeeper_path / ("finished|" + current_host)) , num_hosts_node_path(zookeeper_path / "num_hosts") + , error_node_path(zookeeper_path / "error") , alive_node_path(zookeeper_path / ("alive|" + current_host)) , alive_tracker_node_path(fs::path{root_zookeeper_path} / "alive_tracker") - , error_node_path(zookeeper_path / "error") , zk_nodes_changed(std::make_shared()) { - if ((zookeeper_path.filename() != "stage") || !operation_node_name.starts_with(is_restore ? "restore-" : "backup-") - || (root_zookeeper_path == operation_node_path)) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected path in ZooKeeper specified: {}", zookeeper_path); - } - initializeState(); createRootNodes(); try { + concurrency_check.emplace(is_restore, /* on_cluster = */ true, zookeeper_path, allow_concurrency, concurrency_counters_); createStartAndAliveNodes(); startWatchingThread(); } catch (...) { - trySetError(std::current_exception()); - tryFinishImpl(); + if (setError(std::current_exception(), /* throw_if_error = */ false)) + finish(/* throw_if_error = */ false); throw; } } @@ -125,7 +136,26 @@ BackupCoordinationStageSync::BackupCoordinationStageSync( BackupCoordinationStageSync::~BackupCoordinationStageSync() { - tryFinishImpl(); + /// Normally either finish() or setError() must be called. + if (!tried_to_finish) + { + if (state.host_with_error) + { + /// setError() was called and succeeded. + finish(/* throw_if_error = */ false); + } + else if (!tried_to_set_error) + { + /// Neither finish() nor setError() were called, it's a bug. + chassert(false, "~BackupCoordinationStageSync() is called without finish() or setError()"); + LOG_ERROR(log, "~BackupCoordinationStageSync() is called without finish() or setError()"); + } + } + + /// Normally the watching thread should be stopped already because the finish() function stops it. + /// However if an error happened then the watching thread can be still running, + /// so here in the destructor we have to ensure that it's stopped. + stopWatchingThread(); } @@ -137,6 +167,12 @@ void BackupCoordinationStageSync::initializeState() for (const String & host : all_hosts) state.hosts.emplace(host, HostInfo{.host = host, .last_connection_time = now, .last_connection_time_monotonic = monotonic_now}); + + if (!state.hosts.contains(current_host)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "List of hosts must contain the current host"); + + if (!state.hosts.contains(String{kInitiator})) + throw Exception(ErrorCodes::LOGICAL_ERROR, "List of hosts must contain the initiator"); } @@ -179,6 +215,12 @@ String BackupCoordinationStageSync::getHostsDesc(const Strings & hosts) void BackupCoordinationStageSync::createRootNodes() { + if ((zookeeper_path.filename() != "stage") || !operation_node_name.starts_with(is_restore ? "restore-" : "backup-") + || (root_zookeeper_path == operation_zookeeper_path)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected path in ZooKeeper specified: {}", zookeeper_path); + } + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::createRootNodes", WithRetries::kInitialization); holder.retries_ctl.retryLoop( [&, &zookeeper = holder.faulty_zookeeper]() @@ -252,27 +294,27 @@ void BackupCoordinationStageSync::createStartAndAliveNodes(Coordination::ZooKeep Coordination::Requests requests; requests.reserve(6); - size_t operation_node_path_pos = static_cast(-1); - if (!zookeeper->exists(operation_node_path)) + size_t operation_node_pos = static_cast(-1); + if (!zookeeper->exists(operation_zookeeper_path)) { - operation_node_path_pos = requests.size(); - requests.emplace_back(zkutil::makeCreateRequest(operation_node_path, "", zkutil::CreateMode::Persistent)); + operation_node_pos = requests.size(); + requests.emplace_back(zkutil::makeCreateRequest(operation_zookeeper_path, "", zkutil::CreateMode::Persistent)); } - size_t stage_node_path_pos = static_cast(-1); - if (!zookeeper->exists(stage_node_path)) + size_t zookeeper_path_pos = static_cast(-1); + if (!zookeeper->exists(zookeeper_path)) { - stage_node_path_pos = requests.size(); - requests.emplace_back(zkutil::makeCreateRequest(stage_node_path, "", zkutil::CreateMode::Persistent)); + zookeeper_path_pos = requests.size(); + requests.emplace_back(zkutil::makeCreateRequest(zookeeper_path, "", zkutil::CreateMode::Persistent)); } - size_t num_hosts_node_path_pos = requests.size(); + size_t num_hosts_node_pos = requests.size(); if (num_hosts) requests.emplace_back(zkutil::makeSetRequest(num_hosts_node_path, toString(*num_hosts + 1), num_hosts_version)); else requests.emplace_back(zkutil::makeCreateRequest(num_hosts_node_path, "1", zkutil::CreateMode::Persistent)); - size_t alive_tracker_node_path_pos = requests.size(); + size_t alive_tracker_node_pos = requests.size(); requests.emplace_back(zkutil::makeSetRequest(alive_tracker_node_path, "", alive_tracker_version)); requests.emplace_back(zkutil::makeCreateRequest(start_node_path, std::to_string(kCurrentVersion), zkutil::CreateMode::Persistent)); @@ -284,7 +326,7 @@ void BackupCoordinationStageSync::createStartAndAliveNodes(Coordination::ZooKeep if (code == Coordination::Error::ZOK) { LOG_INFO(log, "Created start node #{} in ZooKeeper for {} (coordination version: {})", - num_hosts.value_or(0) + 1, current_host_desc, kCurrentVersion); + num_hosts.value_or(0) + 1, current_host_desc, static_cast(kCurrentVersion)); return; } @@ -294,40 +336,34 @@ void BackupCoordinationStageSync::createStartAndAliveNodes(Coordination::ZooKeep LOG_TRACE(log, "{} (attempt #{}){}", message, attempt_no, will_try_again ? ", will try again" : ""); }; - if ((responses.size() > operation_node_path_pos) && - (responses[operation_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) + if ((operation_node_pos < responses.size()) && + (responses[operation_node_pos]->error == Coordination::Error::ZNODEEXISTS)) { - show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper already exists", operation_node_path)); + show_error_before_next_attempt(fmt::format("Node {} already exists", operation_zookeeper_path)); /// needs another attempt } - else if ((responses.size() > stage_node_path_pos) && - (responses[stage_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) + else if ((zookeeper_path_pos < responses.size()) && + (responses[zookeeper_path_pos]->error == Coordination::Error::ZNODEEXISTS)) { - show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper already exists", stage_node_path)); + show_error_before_next_attempt(fmt::format("Node {} already exists", zookeeper_path)); /// needs another attempt } - else if ((responses.size() > num_hosts_node_path_pos) && num_hosts && - (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZBADVERSION)) + else if ((num_hosts_node_pos < responses.size()) && !num_hosts && + (responses[num_hosts_node_pos]->error == Coordination::Error::ZNODEEXISTS)) { - show_error_before_next_attempt("Other host changed the 'num_hosts' node in ZooKeeper"); + show_error_before_next_attempt(fmt::format("Node {} already exists", num_hosts_node_path)); + /// needs another attempt + } + else if ((num_hosts_node_pos < responses.size()) && num_hosts && + (responses[num_hosts_node_pos]->error == Coordination::Error::ZBADVERSION)) + { + show_error_before_next_attempt(fmt::format("The version of node {} changed", num_hosts_node_path)); num_hosts.reset(); /// needs to reread 'num_hosts' again } - else if ((responses.size() > num_hosts_node_path_pos) && num_hosts && - (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZNONODE)) + else if ((alive_tracker_node_pos < responses.size()) && + (responses[alive_tracker_node_pos]->error == Coordination::Error::ZBADVERSION)) { - show_error_before_next_attempt("Other host removed the 'num_hosts' node in ZooKeeper"); - num_hosts.reset(); /// needs to reread 'num_hosts' again - } - else if ((responses.size() > num_hosts_node_path_pos) && !num_hosts && - (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) - { - show_error_before_next_attempt("Other host created the 'num_hosts' node in ZooKeeper"); - /// needs another attempt - } - else if ((responses.size() > alive_tracker_node_path_pos) && - (responses[alive_tracker_node_path_pos]->error == Coordination::Error::ZBADVERSION)) - { - show_error_before_next_attempt("Concurrent backup or restore changed some 'alive' nodes in ZooKeeper"); + show_error_before_next_attempt(fmt::format("The version of node {} changed", alive_tracker_node_path)); check_concurrency = true; /// needs to recheck for concurrency again } else @@ -337,8 +373,7 @@ void BackupCoordinationStageSync::createStartAndAliveNodes(Coordination::ZooKeep } throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, - "Couldn't create the 'start' node in ZooKeeper for {} after {} attempts", - current_host_desc, max_attempts_after_bad_version); + "Couldn't create node {} in ZooKeeper after {} attempts", start_node_path, max_attempts_after_bad_version); } @@ -387,36 +422,53 @@ void BackupCoordinationStageSync::startWatchingThread() void BackupCoordinationStageSync::stopWatchingThread() { - should_stop_watching_thread = true; + { + std::lock_guard lock{mutex}; + if (should_stop_watching_thread) + return; + should_stop_watching_thread = true; - /// Wake up waiting threads. - if (zk_nodes_changed) - zk_nodes_changed->set(); - state_changed.notify_all(); + /// Wake up waiting threads. + if (zk_nodes_changed) + zk_nodes_changed->set(); + state_changed.notify_all(); + } if (watching_thread_future.valid()) watching_thread_future.wait(); + + LOG_TRACE(log, "Stopped the watching thread"); } void BackupCoordinationStageSync::watchingThread() { - while (!should_stop_watching_thread) + auto should_stop = [&] + { + std::lock_guard lock{mutex}; + return should_stop_watching_thread; + }; + + while (!should_stop()) { try { /// Check if the current BACKUP or RESTORE command is already cancelled. checkIfQueryCancelled(); + } + catch (...) + { + tryLogCurrentException(log, "Caugth exception while watching"); + } - /// Reset the `connected` flag for each host, we'll set them to true again after we find the 'alive' nodes. - resetConnectedFlag(); - + try + { /// Recreate the 'alive' node if necessary and read a new state from ZooKeeper. auto holder = with_retries.createRetriesControlHolder("BackupStageSync::watchingThread"); auto & zookeeper = holder.faulty_zookeeper; with_retries.renewZooKeeper(zookeeper); - if (should_stop_watching_thread) + if (should_stop()) return; /// Recreate the 'alive' node if it was removed. @@ -427,7 +479,10 @@ void BackupCoordinationStageSync::watchingThread() } catch (...) { - tryLogCurrentException(log, "Caugth exception while watching"); + tryLogCurrentException(log, "Caught exception while watching"); + + /// Reset the `connected` flag for each host, we'll set them to true again after we find the 'alive' nodes. + resetConnectedFlag(); } try @@ -438,7 +493,7 @@ void BackupCoordinationStageSync::watchingThread() } catch (...) { - tryLogCurrentException(log, "Caugth exception while checking if the query should be cancelled"); + tryLogCurrentException(log, "Caught exception while watching"); } zk_nodes_changed->tryWait(sync_period_ms.count()); @@ -473,7 +528,7 @@ void BackupCoordinationStageSync::readCurrentState(Coordination::ZooKeeperWithFa zk_nodes_changed->reset(); /// Get zk nodes and subscribe on their changes. - Strings new_zk_nodes = zookeeper->getChildren(stage_node_path, nullptr, zk_nodes_changed); + Strings new_zk_nodes = zookeeper->getChildren(zookeeper_path, nullptr, zk_nodes_changed); std::sort(new_zk_nodes.begin(), new_zk_nodes.end()); /// Sorting is necessary because we compare the list of zk nodes with its previous versions. State new_state; @@ -492,6 +547,8 @@ void BackupCoordinationStageSync::readCurrentState(Coordination::ZooKeeperWithFa zk_nodes = new_zk_nodes; new_state = state; + for (auto & [_, host_info] : new_state.hosts) + host_info.connected = false; } auto get_host_info = [&](const String & host) -> HostInfo * @@ -514,7 +571,8 @@ void BackupCoordinationStageSync::readCurrentState(Coordination::ZooKeeperWithFa { String serialized_error = zookeeper->get(error_node_path); auto [exception, host] = parseErrorNode(serialized_error); - if (auto * host_info = get_host_info(host)) + auto * host_info = get_host_info(host); + if (exception && host_info) { host_info->exception = exception; new_state.host_with_error = host; @@ -576,6 +634,9 @@ void BackupCoordinationStageSync::readCurrentState(Coordination::ZooKeeperWithFa { std::lock_guard lock{mutex}; + /// We were reading `new_state` from ZooKeeper with `mutex` unlocked, so `state` could get more information during that reading, + /// we don't want to lose that information, that's why we use merge() here. + new_state.merge(state); was_state_changed = (new_state != state); state = std::move(new_state); } @@ -604,26 +665,10 @@ int BackupCoordinationStageSync::parseStartNode(const String & start_node_conten } -std::pair BackupCoordinationStageSync::parseErrorNode(const String & error_node_contents) -{ - ReadBufferFromOwnString buf{error_node_contents}; - String host; - readStringBinary(host, buf); - auto exception = std::make_exception_ptr(readException(buf, fmt::format("Got error from {}", getHostDesc(host)))); - return {exception, host}; -} - - void BackupCoordinationStageSync::checkIfQueryCancelled() { if (process_list_element->checkTimeLimitSoft()) return; /// Not cancelled. - - std::lock_guard lock{mutex}; - if (state.cancelled) - return; /// Already marked as cancelled. - - state.cancelled = true; state_changed.notify_all(); } @@ -634,13 +679,13 @@ void BackupCoordinationStageSync::cancelQueryIfError() { std::lock_guard lock{mutex}; - if (state.cancelled || !state.host_with_error) + if (!state.host_with_error) return; - state.cancelled = true; exception = state.hosts.at(*state.host_with_error).exception; } + chassert(exception); process_list_element->cancelQuery(false, exception); state_changed.notify_all(); } @@ -652,7 +697,7 @@ void BackupCoordinationStageSync::cancelQueryIfDisconnectedTooLong() { std::lock_guard lock{mutex}; - if (state.cancelled || state.host_with_error || ((failure_after_host_disconnected_for_seconds.count() == 0))) + if (state.host_with_error || ((failure_after_host_disconnected_for_seconds.count() == 0))) return; auto monotonic_now = std::chrono::steady_clock::now(); @@ -685,27 +730,92 @@ void BackupCoordinationStageSync::cancelQueryIfDisconnectedTooLong() } } } - - if (!exception) - return; - - state.cancelled = true; } + if (!exception) + return; + process_list_element->cancelQuery(false, exception); state_changed.notify_all(); } +void BackupCoordinationStageSync::setQueryIsSentToOtherHosts() +{ + std::lock_guard lock{mutex}; + query_is_sent_to_other_hosts = true; +} + +bool BackupCoordinationStageSync::isQuerySentToOtherHosts() const +{ + std::lock_guard lock{mutex}; + return query_is_sent_to_other_hosts; +} + + void BackupCoordinationStageSync::setStage(const String & stage, const String & stage_result) { LOG_INFO(log, "{} reached stage {}", current_host_desc, stage); + + { + std::lock_guard lock{mutex}; + if (state.hosts.at(current_host).stages.contains(stage)) + return; /// Already set. + } + + if ((getInitiatorVersion() == kVersionWithoutFinishNode) && (stage == BackupCoordinationStage::COMPLETED)) + { + LOG_TRACE(log, "Stopping the watching thread because the initiator uses outdated version {}", getInitiatorVersion()); + stopWatchingThread(); + } + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::setStage"); holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() { with_retries.renewZooKeeper(zookeeper); - zookeeper->createIfNotExists(getStageNodePath(stage), stage_result); + createStageNode(stage, stage_result, zookeeper); }); + + /// If the initiator of the query has that old version then it doesn't expect us to create the 'finish' node and moreover + /// the initiator can start removing all the nodes immediately after all hosts report about reaching the "completed" status. + /// So to avoid weird errors in the logs we won't create the 'finish' node if the initiator of the query has that old version. + if ((getInitiatorVersion() == kVersionWithoutFinishNode) && (stage == BackupCoordinationStage::COMPLETED)) + { + LOG_INFO(log, "Skipped creating the 'finish' node because the initiator uses outdated version {}", getInitiatorVersion()); + std::lock_guard lock{mutex}; + tried_to_finish = true; + state.hosts.at(current_host).finished = true; + } +} + + +void BackupCoordinationStageSync::createStageNode(const String & stage, const String & stage_result, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + String serialized_error; + if (zookeeper->tryGet(error_node_path, serialized_error)) + { + auto [exception, host] = parseErrorNode(serialized_error); + if (exception) + std::rethrow_exception(exception); + } + + auto code = zookeeper->tryCreate(getStageNodePath(stage), stage_result, zkutil::CreateMode::Persistent); + if (code == Coordination::Error::ZOK) + { + std::lock_guard lock{mutex}; + state.hosts.at(current_host).stages[stage] = stage_result; + return; + } + + if (code == Coordination::Error::ZNODEEXISTS) + { + String another_result = zookeeper->get(getStageNodePath(stage)); + std::lock_guard lock{mutex}; + state.hosts.at(current_host).stages[stage] = another_result; + return; + } + + throw zkutil::KeeperException::fromPath(code, getStageNodePath(stage)); } @@ -715,71 +825,7 @@ String BackupCoordinationStageSync::getStageNodePath(const String & stage) const } -bool BackupCoordinationStageSync::trySetError(std::exception_ptr exception) noexcept -{ - try - { - std::rethrow_exception(exception); - } - catch (const Exception & e) - { - return trySetError(e); - } - catch (...) - { - return trySetError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode())); - } -} - - -bool BackupCoordinationStageSync::trySetError(const Exception & exception) -{ - try - { - setError(exception); - return true; - } - catch (...) - { - return false; - } -} - - -void BackupCoordinationStageSync::setError(const Exception & exception) -{ - /// Most likely this exception has been already logged so here we're logging it without stacktrace. - String exception_message = getExceptionMessage(exception, /* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true); - LOG_INFO(log, "Sending exception from {} to other hosts: {}", current_host_desc, exception_message); - - auto holder = with_retries.createRetriesControlHolder("BackupStageSync::setError", WithRetries::kErrorHandling); - - holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zookeeper); - - WriteBufferFromOwnString buf; - writeStringBinary(current_host, buf); - writeException(exception, buf, true); - auto code = zookeeper->tryCreate(error_node_path, buf.str(), zkutil::CreateMode::Persistent); - - if (code == Coordination::Error::ZOK) - { - LOG_TRACE(log, "Sent exception from {} to other hosts", current_host_desc); - } - else if (code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "An error has been already assigned for this {}", operation_name); - } - else - { - throw zkutil::KeeperException::fromPath(code, error_node_path); - } - }); -} - - -Strings BackupCoordinationStageSync::waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional timeout) const +Strings BackupCoordinationStageSync::waitHostsReachStage(const Strings & hosts, const String & stage_to_wait) const { Strings results; results.resize(hosts.size()); @@ -787,44 +833,28 @@ Strings BackupCoordinationStageSync::waitForHostsToReachStage(const String & sta std::unique_lock lock{mutex}; /// TSA_NO_THREAD_SAFETY_ANALYSIS is here because Clang Thread Safety Analysis doesn't understand std::unique_lock. - auto check_if_hosts_ready = [&](bool time_is_out) TSA_NO_THREAD_SAFETY_ANALYSIS + auto check_if_hosts_reach_stage = [&]() TSA_NO_THREAD_SAFETY_ANALYSIS { - return checkIfHostsReachStage(hosts, stage_to_wait, time_is_out, timeout, results); + return checkIfHostsReachStage(hosts, stage_to_wait, results); }; - if (timeout) - { - if (!state_changed.wait_for(lock, *timeout, [&] { return check_if_hosts_ready(/* time_is_out = */ false); })) - check_if_hosts_ready(/* time_is_out = */ true); - } - else - { - state_changed.wait(lock, [&] { return check_if_hosts_ready(/* time_is_out = */ false); }); - } + state_changed.wait(lock, check_if_hosts_reach_stage); return results; } -bool BackupCoordinationStageSync::checkIfHostsReachStage( - const Strings & hosts, - const String & stage_to_wait, - bool time_is_out, - std::optional timeout, - Strings & results) const +bool BackupCoordinationStageSync::checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, Strings & results) const { - if (should_stop_watching_thread) - throw Exception(ErrorCodes::LOGICAL_ERROR, "finish() was called while waiting for a stage"); - process_list_element->checkTimeLimit(); for (size_t i = 0; i != hosts.size(); ++i) { const String & host = hosts[i]; auto it = state.hosts.find(host); - if (it == state.hosts.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "waitForHostsToReachStage() was called for unexpected {}, all hosts are {}", getHostDesc(host), getHostsDesc(all_hosts)); + throw Exception(ErrorCodes::LOGICAL_ERROR, + "waitHostsReachStage() was called for unexpected {}, all hosts are {}", getHostDesc(host), getHostsDesc(all_hosts)); const HostInfo & host_info = it->second; auto stage_it = host_info.stages.find(stage_to_wait); @@ -835,10 +865,11 @@ bool BackupCoordinationStageSync::checkIfHostsReachStage( } if (host_info.finished) - { throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "{} finished without coming to stage {}", getHostDesc(host), stage_to_wait); - } + + if (should_stop_watching_thread) + throw Exception(ErrorCodes::LOGICAL_ERROR, "waitHostsReachStage() can't wait for stage {} after the watching thread stopped", stage_to_wait); String host_status; if (!host_info.started) @@ -846,85 +877,73 @@ bool BackupCoordinationStageSync::checkIfHostsReachStage( else if (!host_info.connected) host_status = fmt::format(": the host is currently disconnected, last connection was at {}", host_info.last_connection_time); - if (!time_is_out) - { - LOG_TRACE(log, "Waiting for {} to reach stage {}{}", getHostDesc(host), stage_to_wait, host_status); - return false; - } - else - { - throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, - "Waited longer than timeout {} for {} to reach stage {}{}", - *timeout, getHostDesc(host), stage_to_wait, host_status); - } + LOG_TRACE(log, "Waiting for {} to reach stage {}{}", getHostDesc(host), stage_to_wait, host_status); + return false; /// wait for next change of `state_changed` } LOG_INFO(log, "Hosts {} reached stage {}", getHostsDesc(hosts), stage_to_wait); - return true; + return true; /// stop waiting } -void BackupCoordinationStageSync::finish(bool & other_hosts_also_finished) +bool BackupCoordinationStageSync::finish(bool throw_if_error) { - tryFinishImpl(other_hosts_also_finished, /* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal); + WithRetries::Kind retries_kind = WithRetries::kNormal; + if (throw_if_error) + retries_kind = WithRetries::kErrorHandling; + + return finishImpl(throw_if_error, retries_kind); } -bool BackupCoordinationStageSync::tryFinishAfterError(bool & other_hosts_also_finished) noexcept +bool BackupCoordinationStageSync::finishImpl(bool throw_if_error, WithRetries::Kind retries_kind) { - return tryFinishImpl(other_hosts_also_finished, /* throw_if_error = */ false, /* retries_kind = */ WithRetries::kErrorHandling); -} - - -bool BackupCoordinationStageSync::tryFinishImpl() -{ - bool other_hosts_also_finished; - return tryFinishAfterError(other_hosts_also_finished); -} - - -bool BackupCoordinationStageSync::tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind) -{ - auto get_value_other_hosts_also_finished = [&] TSA_REQUIRES(mutex) - { - other_hosts_also_finished = true; - for (const auto & [host, host_info] : state.hosts) - { - if ((host != current_host) && !host_info.finished) - other_hosts_also_finished = false; - } - }; - { std::lock_guard lock{mutex}; - if (finish_result.succeeded) + + if (finishedNoLock()) { - get_value_other_hosts_also_finished(); + LOG_INFO(log, "The finish node for {} already exists", current_host_desc); return true; } - if (finish_result.exception) + + if (tried_to_finish) { - if (throw_if_error) - std::rethrow_exception(finish_result.exception); + /// We don't repeat creating the finish node, no matter if it was successful or not. + LOG_INFO(log, "Skipped creating the finish node for {} because earlier we failed to do that", current_host_desc); return false; } + + bool failed_to_set_error = tried_to_set_error && !state.host_with_error; + if (failed_to_set_error) + { + /// Tried to create the 'error' node, but failed. + /// Then it's better not to create the 'finish' node in this case because otherwise other hosts might think we've succeeded. + LOG_INFO(log, "Skipping creating the finish node for {} because there was an error which we were unable to send to other hosts", current_host_desc); + return false; + } + + if (current_host == kInitiator) + { + /// Normally the initiator should wait for other hosts to finish before creating its own finish node. + /// We show warning if some of the other hosts didn't finish. + bool expect_other_hosts_finished = query_is_sent_to_other_hosts || !state.host_with_error; + bool other_hosts_finished = otherHostsFinishedNoLock() || !expect_other_hosts_finished; + if (!other_hosts_finished) + LOG_WARNING(log, "Hosts {} didn't finish before the initiator", getHostsDesc(getUnfinishedOtherHostsNoLock())); + } } + stopWatchingThread(); + try { - stopWatchingThread(); - auto holder = with_retries.createRetriesControlHolder("BackupStageSync::finish", retries_kind); holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() { with_retries.renewZooKeeper(zookeeper); - createFinishNodeAndRemoveAliveNode(zookeeper); + createFinishNodeAndRemoveAliveNode(zookeeper, throw_if_error); }); - - std::lock_guard lock{mutex}; - finish_result.succeeded = true; - get_value_other_hosts_also_finished(); - return true; } catch (...) { @@ -933,63 +952,87 @@ bool BackupCoordinationStageSync::tryFinishImpl(bool & other_hosts_also_finished getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true)); std::lock_guard lock{mutex}; - finish_result.exception = std::current_exception(); + tried_to_finish = true; + if (throw_if_error) throw; return false; } + + { + std::lock_guard lock{mutex}; + tried_to_finish = true; + state.hosts.at(current_host).finished = true; + } + + return true; } -void BackupCoordinationStageSync::createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +void BackupCoordinationStageSync::createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper, bool throw_if_error) { - if (zookeeper->exists(finish_node_path)) - return; - - /// If the initiator of the query has that old version then it doesn't expect us to create the 'finish' node and moreover - /// the initiator can start removing all the nodes immediately after all hosts report about reaching the "completed" status. - /// So to avoid weird errors in the logs we won't create the 'finish' node if the initiator of the query has that old version. - if ((getInitiatorVersion() == kVersionWithoutFinishNode) && (current_host != kInitiator)) - { - LOG_INFO(log, "Skipped creating the 'finish' node because the initiator uses outdated version {}", getInitiatorVersion()); - return; - } - std::optional num_hosts; int num_hosts_version = -1; for (size_t attempt_no = 1; attempt_no <= max_attempts_after_bad_version; ++attempt_no) { + /// The 'num_hosts' node may not exist if createStartAndAliveNodes() failed in the constructor. if (!num_hosts) { + String num_hosts_str; Coordination::Stat stat; - num_hosts = parseFromString(zookeeper->get(num_hosts_node_path, &stat)); - num_hosts_version = stat.version; + if (zookeeper->tryGet(num_hosts_node_path, num_hosts_str, &stat)) + { + num_hosts = parseFromString(num_hosts_str); + num_hosts_version = stat.version; + } } + String serialized_error; + if (throw_if_error && zookeeper->tryGet(error_node_path, serialized_error)) + { + auto [exception, host] = parseErrorNode(serialized_error); + if (exception) + std::rethrow_exception(exception); + } + + if (zookeeper->exists(finish_node_path)) + return; + + bool start_node_exists = zookeeper->exists(start_node_path); + Coordination::Requests requests; requests.reserve(3); requests.emplace_back(zkutil::makeCreateRequest(finish_node_path, "", zkutil::CreateMode::Persistent)); - size_t num_hosts_node_path_pos = requests.size(); - requests.emplace_back(zkutil::makeSetRequest(num_hosts_node_path, toString(*num_hosts - 1), num_hosts_version)); - - size_t alive_node_path_pos = static_cast(-1); + size_t alive_node_pos = static_cast(-1); if (zookeeper->exists(alive_node_path)) { - alive_node_path_pos = requests.size(); + alive_node_pos = requests.size(); requests.emplace_back(zkutil::makeRemoveRequest(alive_node_path, -1)); } + size_t num_hosts_node_pos = static_cast(-1); + if (num_hosts) + { + num_hosts_node_pos = requests.size(); + requests.emplace_back(zkutil::makeSetRequest(num_hosts_node_path, toString(start_node_exists ? (*num_hosts - 1) : *num_hosts), num_hosts_version)); + } + Coordination::Responses responses; auto code = zookeeper->tryMulti(requests, responses); if (code == Coordination::Error::ZOK) { - --*num_hosts; - String hosts_left_desc = ((*num_hosts == 0) ? "no hosts left" : fmt::format("{} hosts left", *num_hosts)); - LOG_INFO(log, "Created the 'finish' node in ZooKeeper for {}, {}", current_host_desc, hosts_left_desc); + String hosts_left_desc; + if (num_hosts) + { + if (start_node_exists) + --*num_hosts; + hosts_left_desc = (*num_hosts == 0) ? ", no hosts left" : fmt::format(", {} hosts left", *num_hosts); + } + LOG_INFO(log, "Created the 'finish' node in ZooKeeper for {}{}", current_host_desc, hosts_left_desc); return; } @@ -999,18 +1042,18 @@ void BackupCoordinationStageSync::createFinishNodeAndRemoveAliveNode(Coordinatio LOG_TRACE(log, "{} (attempt #{}){}", message, attempt_no, will_try_again ? ", will try again" : ""); }; - if ((responses.size() > num_hosts_node_path_pos) && - (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZBADVERSION)) + if ((alive_node_pos < responses.size()) && + (responses[alive_node_pos]->error == Coordination::Error::ZNONODE)) { - show_error_before_next_attempt("Other host changed the 'num_hosts' node in ZooKeeper"); - num_hosts.reset(); /// needs to reread 'num_hosts' again - } - else if ((responses.size() > alive_node_path_pos) && - (responses[alive_node_path_pos]->error == Coordination::Error::ZNONODE)) - { - show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper doesn't exist", alive_node_path_pos)); + show_error_before_next_attempt(fmt::format("Node {} doesn't exist", alive_node_path)); /// needs another attempt } + else if ((num_hosts_node_pos < responses.size()) && + (responses[num_hosts_node_pos]->error == Coordination::Error::ZBADVERSION)) + { + show_error_before_next_attempt(fmt::format("The version of node {} changed", num_hosts_node_path)); + num_hosts.reset(); /// needs to reread 'num_hosts' again + } else { zkutil::KeeperMultiException::check(code, requests, responses); @@ -1026,60 +1069,73 @@ void BackupCoordinationStageSync::createFinishNodeAndRemoveAliveNode(Coordinatio int BackupCoordinationStageSync::getInitiatorVersion() const { std::lock_guard lock{mutex}; - auto it = state.hosts.find(String{kInitiator}); - if (it == state.hosts.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no initiator of this {} query, it's a bug", operation_name); - const HostInfo & host_info = it->second; - return host_info.version; + return state.hosts.at(String{kInitiator}).version; } -void BackupCoordinationStageSync::waitForOtherHostsToFinish() const -{ - tryWaitForOtherHostsToFinishImpl(/* reason = */ "", /* throw_if_error = */ true, /* timeout = */ {}); -} - - -bool BackupCoordinationStageSync::tryWaitForOtherHostsToFinishAfterError() const noexcept +bool BackupCoordinationStageSync::waitOtherHostsFinish(bool throw_if_error) const { std::optional timeout; - if (finish_timeout_after_error.count() != 0) - timeout = finish_timeout_after_error; + String reason; - String reason = fmt::format("{} needs other hosts to finish before cleanup", current_host_desc); - return tryWaitForOtherHostsToFinishImpl(reason, /* throw_if_error = */ false, timeout); + if (!throw_if_error) + { + if (finish_timeout_after_error.count() != 0) + timeout = finish_timeout_after_error; + reason = "after error before cleanup"; + } + + return waitOtherHostsFinishImpl(reason, timeout, throw_if_error); } -bool BackupCoordinationStageSync::tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional timeout) const +bool BackupCoordinationStageSync::waitOtherHostsFinishImpl(const String & reason, std::optional timeout, bool throw_if_error) const { std::unique_lock lock{mutex}; /// TSA_NO_THREAD_SAFETY_ANALYSIS is here because Clang Thread Safety Analysis doesn't understand std::unique_lock. - auto check_if_other_hosts_finish = [&](bool time_is_out) TSA_NO_THREAD_SAFETY_ANALYSIS + auto other_hosts_finished = [&]() TSA_NO_THREAD_SAFETY_ANALYSIS { return otherHostsFinishedNoLock(); }; + + if (other_hosts_finished()) { - return checkIfOtherHostsFinish(reason, throw_if_error, time_is_out, timeout); + LOG_TRACE(log, "Other hosts have already finished"); + return true; + } + + bool failed_to_set_error = TSA_SUPPRESS_WARNING_FOR_READ(tried_to_set_error) && !TSA_SUPPRESS_WARNING_FOR_READ(state).host_with_error; + if (failed_to_set_error) + { + /// Tried to create the 'error' node, but failed. + /// Then it's better not to wait for other hosts to finish in this case because other hosts don't know they should finish. + LOG_INFO(log, "Skipping waiting for other hosts to finish because there was an error which we were unable to send to other hosts"); + return false; + } + + bool result = false; + + /// TSA_NO_THREAD_SAFETY_ANALYSIS is here because Clang Thread Safety Analysis doesn't understand std::unique_lock. + auto check_if_hosts_finish = [&](bool time_is_out) TSA_NO_THREAD_SAFETY_ANALYSIS + { + return checkIfOtherHostsFinish(reason, timeout, time_is_out, result, throw_if_error); }; if (timeout) { - if (state_changed.wait_for(lock, *timeout, [&] { return check_if_other_hosts_finish(/* time_is_out = */ false); })) - return true; - return check_if_other_hosts_finish(/* time_is_out = */ true); + if (!state_changed.wait_for(lock, *timeout, [&] { return check_if_hosts_finish(/* time_is_out = */ false); })) + check_if_hosts_finish(/* time_is_out = */ true); } else { - state_changed.wait(lock, [&] { return check_if_other_hosts_finish(/* time_is_out = */ false); }); - return true; + state_changed.wait(lock, [&] { return check_if_hosts_finish(/* time_is_out = */ false); }); } + + return result; } -bool BackupCoordinationStageSync::checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional timeout) const +bool BackupCoordinationStageSync::checkIfOtherHostsFinish( + const String & reason, std::optional timeout, bool time_is_out, bool & result, bool throw_if_error) const { - if (should_stop_watching_thread) - throw Exception(ErrorCodes::LOGICAL_ERROR, "finish() was called while waiting for other hosts to finish"); - if (throw_if_error) process_list_element->checkTimeLimit(); @@ -1088,38 +1144,261 @@ bool BackupCoordinationStageSync::checkIfOtherHostsFinish(const String & reason, if ((host == current_host) || host_info.finished) continue; + String reason_text = reason.empty() ? "" : (" " + reason); + String host_status; if (!host_info.started) host_status = fmt::format(": the host hasn't started working on this {} yet", operation_name); else if (!host_info.connected) host_status = fmt::format(": the host is currently disconnected, last connection was at {}", host_info.last_connection_time); - if (!time_is_out) + if (time_is_out) { - String reason_text = reason.empty() ? "" : (" because " + reason); - LOG_TRACE(log, "Waiting for {} to finish{}{}", getHostDesc(host), reason_text, host_status); - return false; - } - else - { - String reason_text = reason.empty() ? "" : fmt::format(" (reason of waiting: {})", reason); - if (!throw_if_error) - { - LOG_INFO(log, "Waited longer than timeout {} for {} to finish{}{}", - *timeout, getHostDesc(host), host_status, reason_text); - return false; - } - else + if (throw_if_error) { throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Waited longer than timeout {} for {} to finish{}{}", - *timeout, getHostDesc(host), host_status, reason_text); + *timeout, getHostDesc(host), reason_text, host_status); } + LOG_INFO(log, "Waited longer than timeout {} for {} to finish{}{}", + *timeout, getHostDesc(host), reason_text, host_status); + result = false; + return true; /// stop waiting } + + if (should_stop_watching_thread) + { + LOG_ERROR(log, "waitOtherHostFinish({}) can't wait for other hosts to finish after the watching thread stopped", throw_if_error); + chassert(false, "waitOtherHostFinish() can't wait for other hosts to finish after the watching thread stopped"); + if (throw_if_error) + throw Exception(ErrorCodes::LOGICAL_ERROR, "waitOtherHostsFinish() can't wait for other hosts to finish after the watching thread stopped"); + result = false; + return true; /// stop waiting + } + + LOG_TRACE(log, "Waiting for {} to finish{}{}", getHostDesc(host), reason_text, host_status); + return false; /// wait for next change of `state_changed` } LOG_TRACE(log, "Other hosts finished working on this {}", operation_name); + result = true; + return true; /// stop waiting +} + + +bool BackupCoordinationStageSync::finished() const +{ + std::lock_guard lock{mutex}; + return finishedNoLock(); +} + + +bool BackupCoordinationStageSync::finishedNoLock() const +{ + return state.hosts.at(current_host).finished; +} + + +bool BackupCoordinationStageSync::otherHostsFinished() const +{ + std::lock_guard lock{mutex}; + return otherHostsFinishedNoLock(); +} + + +bool BackupCoordinationStageSync::otherHostsFinishedNoLock() const +{ + for (const auto & [host, host_info] : state.hosts) + { + if (!host_info.finished && (host != current_host)) + return false; + } return true; } + +bool BackupCoordinationStageSync::allHostsFinishedNoLock() const +{ + return finishedNoLock() && otherHostsFinishedNoLock(); +} + + +Strings BackupCoordinationStageSync::getUnfinishedHosts() const +{ + std::lock_guard lock{mutex}; + return getUnfinishedHostsNoLock(); +} + + +Strings BackupCoordinationStageSync::getUnfinishedHostsNoLock() const +{ + if (allHostsFinishedNoLock()) + return {}; + + Strings res; + res.reserve(all_hosts.size()); + for (const auto & [host, host_info] : state.hosts) + { + if (!host_info.finished) + res.emplace_back(host); + } + return res; +} + + +Strings BackupCoordinationStageSync::getUnfinishedOtherHosts() const +{ + std::lock_guard lock{mutex}; + return getUnfinishedOtherHostsNoLock(); +} + + +Strings BackupCoordinationStageSync::getUnfinishedOtherHostsNoLock() const +{ + if (otherHostsFinishedNoLock()) + return {}; + + Strings res; + res.reserve(all_hosts.size() - 1); + for (const auto & [host, host_info] : state.hosts) + { + if (!host_info.finished && (host != current_host)) + res.emplace_back(host); + } + return res; +} + + +bool BackupCoordinationStageSync::setError(std::exception_ptr exception, bool throw_if_error) +{ + try + { + std::rethrow_exception(exception); + } + catch (const Exception & e) + { + return setError(e, throw_if_error); + } + catch (...) + { + return setError(Exception{getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()}, throw_if_error); + } +} + + +bool BackupCoordinationStageSync::setError(const Exception & exception, bool throw_if_error) +{ + try + { + /// Most likely this exception has been already logged so here we're logging it without stacktrace. + String exception_message = getExceptionMessage(exception, /* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true); + LOG_INFO(log, "Sending exception from {} to other hosts: {}", current_host_desc, exception_message); + + { + std::lock_guard lock{mutex}; + if (state.host_with_error) + { + LOG_INFO(log, "The error node already exists"); + return true; + } + + if (tried_to_set_error) + { + LOG_INFO(log, "Skipped creating the error node because earlier we failed to do that"); + return false; + } + } + + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::setError", WithRetries::kErrorHandling); + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zookeeper); + createErrorNode(exception, zookeeper); + }); + + { + std::lock_guard lock{mutex}; + tried_to_set_error = true; + return true; + } + } + catch (...) + { + LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this {}: {}", + is_restore ? "restore" : "backup", + getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true)); + + std::lock_guard lock{mutex}; + tried_to_set_error = true; + + if (throw_if_error) + throw; + return false; + } +} + + +void BackupCoordinationStageSync::createErrorNode(const Exception & exception, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + String serialized_error; + { + WriteBufferFromOwnString buf; + writeStringBinary(current_host, buf); + writeException(exception, buf, true); + serialized_error = buf.str(); + } + + auto code = zookeeper->tryCreate(error_node_path, serialized_error, zkutil::CreateMode::Persistent); + + if (code == Coordination::Error::ZOK) + { + std::lock_guard lock{mutex}; + if (!state.host_with_error) + { + state.host_with_error = current_host; + state.hosts.at(current_host).exception = parseErrorNode(serialized_error).first; + } + LOG_TRACE(log, "Sent exception from {} to other hosts", current_host_desc); + return; + } + + if (code == Coordination::Error::ZNODEEXISTS) + { + String another_error = zookeeper->get(error_node_path); + auto [another_exception, host] = parseErrorNode(another_error); + if (another_exception) + { + std::lock_guard lock{mutex}; + if (!state.host_with_error) + { + state.host_with_error = host; + state.hosts.at(host).exception = another_exception; + } + LOG_INFO(log, "Another error is already assigned for this {}", operation_name); + return; + } + } + + throw zkutil::KeeperException::fromPath(code, error_node_path); +} + + +std::pair BackupCoordinationStageSync::parseErrorNode(const String & error_node_contents) const +{ + ReadBufferFromOwnString buf{error_node_contents}; + String host; + readStringBinary(host, buf); + if (std::find(all_hosts.begin(), all_hosts.end(), host) == all_hosts.end()) + return {}; + auto exception = std::make_exception_ptr(readException(buf, fmt::format("Got error from {}", getHostDesc(host)))); + return {exception, host}; +} + + +bool BackupCoordinationStageSync::isErrorSet() const +{ + std::lock_guard lock{mutex}; + return state.host_with_error.has_value(); +} + } diff --git a/src/Backups/BackupCoordinationStageSync.h b/src/Backups/BackupCoordinationStageSync.h index dc0d3c3c83d..11d3d1cf6f4 100644 --- a/src/Backups/BackupCoordinationStageSync.h +++ b/src/Backups/BackupCoordinationStageSync.h @@ -1,7 +1,9 @@ #pragma once +#include #include + namespace DB { @@ -9,12 +11,16 @@ namespace DB class BackupCoordinationStageSync { public: + /// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER or RESTORE ON CLUSTER query. + static const constexpr std::string_view kInitiator; + BackupCoordinationStageSync( bool is_restore_, /// true if this is a RESTORE ON CLUSTER command, false if this is a BACKUP ON CLUSTER command const String & zookeeper_path_, /// path to the "stage" folder in ZooKeeper const String & current_host_, /// the current host, or an empty string if it's the initiator of the BACKUP/RESTORE ON CLUSTER command const Strings & all_hosts_, /// all the hosts (including the initiator and the current host) performing the BACKUP/RESTORE ON CLUSTER command bool allow_concurrency_, /// whether it's allowed to have concurrent backups or restores. + BackupConcurrencyCounters & concurrency_counters_, const WithRetries & with_retries_, ThreadPoolCallbackRunnerUnsafe schedule_, QueryStatusPtr process_list_element_, @@ -22,30 +28,37 @@ public: ~BackupCoordinationStageSync(); + /// Sets that the BACKUP or RESTORE query was sent to other hosts. + void setQueryIsSentToOtherHosts(); + bool isQuerySentToOtherHosts() const; + /// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that. void setStage(const String & stage, const String & stage_result = {}); - /// Waits until all the specified hosts come to the specified stage. - /// The function returns the results which specified hosts set when they came to the required stage. - /// If it doesn't happen before the timeout then the function will stop waiting and throw an exception. - Strings waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional timeout = {}) const; - - /// Waits until all the other hosts finish their work. - /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. - void waitForOtherHostsToFinish() const; - - /// Lets other host know that the current host has finished its work. - void finish(bool & other_hosts_also_finished); + /// Waits until specified hosts come to the specified stage. + /// The function returns the results which the specified hosts set when they came to the required stage. + Strings waitHostsReachStage(const Strings & hosts, const String & stage_to_wait) const; /// Lets other hosts know that the current host has encountered an error. - bool trySetError(std::exception_ptr exception) noexcept; + /// The function returns true if it successfully created the error node or if the error node was found already exist. + bool setError(std::exception_ptr exception, bool throw_if_error); + bool isErrorSet() const; - /// Waits until all the other hosts finish their work (as a part of error-handling process). - /// Doesn't stops waiting if some host encounters an error or gets cancelled. - bool tryWaitForOtherHostsToFinishAfterError() const noexcept; + /// Waits until the hosts other than the current host finish their work. Must be called before finish(). + /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. + bool waitOtherHostsFinish(bool throw_if_error) const; + bool otherHostsFinished() const; - /// Lets other host know that the current host has finished its work (as a part of error-handling process). - bool tryFinishAfterError(bool & other_hosts_also_finished) noexcept; + /// Lets other hosts know that the current host has finished its work. + bool finish(bool throw_if_error); + bool finished() const; + + /// Returns true if all the hosts have finished. + bool allHostsFinished() const { return finished() && otherHostsFinished(); } + + /// Returns a list of the hosts which haven't finished yet. + Strings getUnfinishedHosts() const; + Strings getUnfinishedOtherHosts() const; /// Returns a printable name of a specific host. For empty host the function returns "initiator". static String getHostDesc(const String & host); @@ -78,14 +91,17 @@ private: /// Reads the current state from ZooKeeper without throwing exceptions. void readCurrentState(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + + /// Creates a stage node to let other hosts know we've reached the specified stage. + void createStageNode(const String & stage, const String & stage_result, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); String getStageNodePath(const String & stage) const; /// Lets other hosts know that the current host has encountered an error. - bool trySetError(const Exception & exception); - void setError(const Exception & exception); + bool setError(const Exception & exception, bool throw_if_error); + void createErrorNode(const Exception & exception, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); /// Deserializes an error stored in the error node. - static std::pair parseErrorNode(const String & error_node_contents); + std::pair parseErrorNode(const String & error_node_contents) const; /// Reset the `connected` flag for each host. void resetConnectedFlag(); @@ -102,19 +118,27 @@ private: void cancelQueryIfDisconnectedTooLong(); /// Used by waitForHostsToReachStage() to check if everything is ready to return. - bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, bool time_is_out, std::optional timeout, Strings & results) const TSA_REQUIRES(mutex); + bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, Strings & results) const TSA_REQUIRES(mutex); /// Creates the 'finish' node. - bool tryFinishImpl(); - bool tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind); - void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + bool finishImpl(bool throw_if_error, WithRetries::Kind retries_kind); + void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper, bool throw_if_error); /// Returns the version used by the initiator. int getInitiatorVersion() const; /// Waits until all the other hosts finish their work. - bool tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional timeout) const; - bool checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional timeout) const TSA_REQUIRES(mutex); + bool waitOtherHostsFinishImpl(const String & reason, std::optional timeout, bool throw_if_error) const; + bool checkIfOtherHostsFinish(const String & reason, std::optional timeout, bool time_is_out, bool & result, bool throw_if_error) const TSA_REQUIRES(mutex); + + /// Returns true if all the hosts have finished. + bool allHostsFinishedNoLock() const TSA_REQUIRES(mutex); + bool finishedNoLock() const TSA_REQUIRES(mutex); + bool otherHostsFinishedNoLock() const TSA_REQUIRES(mutex); + + /// Returns a list of the hosts which haven't finished yet. + Strings getUnfinishedHostsNoLock() const TSA_REQUIRES(mutex); + Strings getUnfinishedOtherHostsNoLock() const TSA_REQUIRES(mutex); const bool is_restore; const String operation_name; @@ -138,15 +162,16 @@ private: /// Paths in ZooKeeper. const std::filesystem::path zookeeper_path; const String root_zookeeper_path; - const String operation_node_path; + const String operation_zookeeper_path; const String operation_node_name; - const String stage_node_path; const String start_node_path; const String finish_node_path; const String num_hosts_node_path; + const String error_node_path; const String alive_node_path; const String alive_tracker_node_path; - const String error_node_path; + + std::optional concurrency_check; std::shared_ptr zk_nodes_changed; @@ -176,25 +201,21 @@ private: { std::map hosts; /// std::map because we need to compare states std::optional host_with_error; - bool cancelled = false; bool operator ==(const State & other) const; bool operator !=(const State & other) const; + void merge(const State & other); }; State state TSA_GUARDED_BY(mutex); mutable std::condition_variable state_changed; std::future watching_thread_future; - std::atomic should_stop_watching_thread = false; + bool should_stop_watching_thread TSA_GUARDED_BY(mutex) = false; - struct FinishResult - { - bool succeeded = false; - std::exception_ptr exception; - bool other_hosts_also_finished = false; - }; - FinishResult finish_result TSA_GUARDED_BY(mutex); + bool query_is_sent_to_other_hosts TSA_GUARDED_BY(mutex) = false; + bool tried_to_finish TSA_GUARDED_BY(mutex) = false; + bool tried_to_set_error TSA_GUARDED_BY(mutex) = false; mutable std::mutex mutex; }; diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 8480dc5d64d..88ebf8eef32 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -329,6 +329,7 @@ std::pair BackupsWorker::start(const ASTPtr & backup_ struct BackupsWorker::BackupStarter { BackupsWorker & backups_worker; + LoggerPtr log; std::shared_ptr backup_query; ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using. ContextMutablePtr backup_context; @@ -345,6 +346,7 @@ struct BackupsWorker::BackupStarter BackupStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_) : backups_worker(backups_worker_) + , log(backups_worker.log) , backup_query(std::static_pointer_cast(query_->clone())) , query_context(context_) , backup_context(Context::createCopy(query_context)) @@ -399,9 +401,20 @@ struct BackupsWorker::BackupStarter chassert(!backup); backup = backups_worker.openBackupForWriting(backup_info, backup_settings, backup_coordination, backup_context); - backups_worker.doBackup( - backup, backup_query, backup_id, backup_name_for_logging, backup_settings, backup_coordination, backup_context, - on_cluster, cluster); + backups_worker.doBackup(backup, backup_query, backup_id, backup_settings, backup_coordination, backup_context, + on_cluster, cluster); + + backup_coordination->finish(/* throw_if_error = */ true); + backup.reset(); + + /// The backup coordination is not needed anymore. + if (!is_internal_backup) + backup_coordination->cleanup(/* throw_if_error = */ true); + backup_coordination.reset(); + + /// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record + LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging); + backups_worker.setStatus(backup_id, BackupStatus::BACKUP_CREATED); } void onException() @@ -416,16 +429,29 @@ struct BackupsWorker::BackupStarter if (backup && !backup->setIsCorrupted()) should_remove_files_in_backup = false; - if (backup_coordination && backup_coordination->trySetError(std::current_exception())) + bool all_hosts_finished = false; + + if (backup_coordination && backup_coordination->setError(std::current_exception(), /* throw_if_error = */ false)) { - bool other_hosts_finished = backup_coordination->tryWaitForOtherHostsToFinishAfterError(); + bool other_hosts_finished = !is_internal_backup + && (!backup_coordination->isBackupQuerySentToOtherHosts() || backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ false)); - if (should_remove_files_in_backup && other_hosts_finished) - backup->tryRemoveAllFiles(); - - backup_coordination->tryFinishAfterError(); + all_hosts_finished = backup_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished; } + if (!all_hosts_finished) + should_remove_files_in_backup = false; + + if (backup && should_remove_files_in_backup) + backup->tryRemoveAllFiles(); + + backup.reset(); + + if (backup_coordination && all_hosts_finished) + backup_coordination->cleanup(/* throw_if_error = */ false); + + backup_coordination.reset(); + backups_worker.setStatusSafe(backup_id, getBackupStatusFromCurrentException()); } }; @@ -497,7 +523,6 @@ void BackupsWorker::doBackup( BackupMutablePtr backup, const std::shared_ptr & backup_query, const OperationID & backup_id, - const String & backup_name_for_logging, const BackupSettings & backup_settings, std::shared_ptr backup_coordination, ContextMutablePtr context, @@ -521,10 +546,10 @@ void BackupsWorker::doBackup( backup_settings.copySettingsToQuery(*backup_query); sendQueryToOtherHosts(*backup_query, cluster, backup_settings.shard_num, backup_settings.replica_num, context, required_access, backup_coordination->getOnClusterInitializationKeeperRetriesInfo()); - backup_coordination->setBackupQueryWasSentToOtherHosts(); + backup_coordination->setBackupQueryIsSentToOtherHosts(); /// Wait until all the hosts have written their backup entries. - backup_coordination->waitForOtherHostsToFinish(); + backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ true); } else { @@ -569,18 +594,8 @@ void BackupsWorker::doBackup( compressed_size = backup->getCompressedSize(); } - /// Close the backup. - backup.reset(); - - /// The backup coordination is not needed anymore. - backup_coordination->finish(); - /// NOTE: we need to update metadata again after backup->finalizeWriting(), because backup metadata is written there. setNumFilesAndSize(backup_id, num_files, total_size, num_entries, uncompressed_size, compressed_size, 0, 0); - - /// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record - LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging); - setStatus(backup_id, BackupStatus::BACKUP_CREATED); } @@ -687,6 +702,7 @@ void BackupsWorker::writeBackupEntries( struct BackupsWorker::RestoreStarter { BackupsWorker & backups_worker; + LoggerPtr log; std::shared_ptr restore_query; ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using. ContextMutablePtr restore_context; @@ -702,6 +718,7 @@ struct BackupsWorker::RestoreStarter RestoreStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_) : backups_worker(backups_worker_) + , log(backups_worker.log) , restore_query(std::static_pointer_cast(query_->clone())) , query_context(context_) , restore_context(Context::createCopy(query_context)) @@ -753,16 +770,17 @@ struct BackupsWorker::RestoreStarter } restore_coordination = backups_worker.makeRestoreCoordination(on_cluster, restore_settings, restore_context); - backups_worker.doRestore( - restore_query, - restore_id, - backup_name_for_logging, - backup_info, - restore_settings, - restore_coordination, - restore_context, - on_cluster, - cluster); + backups_worker.doRestore(restore_query, restore_id, backup_info, restore_settings, restore_coordination, restore_context, + on_cluster, cluster); + + /// The restore coordination is not needed anymore. + restore_coordination->finish(/* throw_if_error = */ true); + if (!is_internal_restore) + restore_coordination->cleanup(/* throw_if_error = */ true); + restore_coordination.reset(); + + LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging); + backups_worker.setStatus(restore_id, BackupStatus::RESTORED); } void onException() @@ -770,12 +788,16 @@ struct BackupsWorker::RestoreStarter /// Something bad happened, some data were not restored. tryLogCurrentException(backups_worker.log, fmt::format("Failed to restore from {} {}", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging)); - if (restore_coordination && restore_coordination->trySetError(std::current_exception())) + if (restore_coordination && restore_coordination->setError(std::current_exception(), /* throw_if_error = */ false)) { - restore_coordination->tryWaitForOtherHostsToFinishAfterError(); - restore_coordination->tryFinishAfterError(); + bool other_hosts_finished = !is_internal_restore + && (!restore_coordination->isRestoreQuerySentToOtherHosts() || restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ false)); + if (restore_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished) + restore_coordination->cleanup(/* throw_if_error = */ false); } + restore_coordination.reset(); + backups_worker.setStatusSafe(restore_id, getRestoreStatusFromCurrentException()); } }; @@ -838,7 +860,6 @@ BackupPtr BackupsWorker::openBackupForReading(const BackupInfo & backup_info, co void BackupsWorker::doRestore( const std::shared_ptr & restore_query, const OperationID & restore_id, - const String & backup_name_for_logging, const BackupInfo & backup_info, RestoreSettings restore_settings, std::shared_ptr restore_coordination, @@ -882,10 +903,10 @@ void BackupsWorker::doRestore( restore_settings.copySettingsToQuery(*restore_query); sendQueryToOtherHosts(*restore_query, cluster, restore_settings.shard_num, restore_settings.replica_num, context, {}, restore_coordination->getOnClusterInitializationKeeperRetriesInfo()); - restore_coordination->setRestoreQueryWasSentToOtherHosts(); + restore_coordination->setRestoreQueryIsSentToOtherHosts(); /// Wait until all the hosts have done with their restoring work. - restore_coordination->waitForOtherHostsToFinish(); + restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ true); } else { @@ -905,12 +926,6 @@ void BackupsWorker::doRestore( backup, context, getThreadPool(ThreadPoolId::RESTORE), after_task_callback}; restorer.run(RestorerFromBackup::RESTORE); } - - /// The restore coordination is not needed anymore. - restore_coordination->finish(); - - LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging); - setStatus(restore_id, BackupStatus::RESTORED); } @@ -943,7 +958,7 @@ BackupsWorker::makeBackupCoordination(bool on_cluster, const BackupSettings & ba if (!on_cluster) { return std::make_shared( - *backup_settings.backup_uuid, !backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters); + !backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters); } bool is_internal_backup = backup_settings.internal; @@ -981,8 +996,7 @@ BackupsWorker::makeRestoreCoordination(bool on_cluster, const RestoreSettings & { if (!on_cluster) { - return std::make_shared( - *restore_settings.restore_uuid, allow_concurrent_restores, *concurrency_counters); + return std::make_shared(allow_concurrent_restores, *concurrency_counters); } bool is_internal_restore = restore_settings.internal; diff --git a/src/Backups/BackupsWorker.h b/src/Backups/BackupsWorker.h index 37f91e269a9..2e5ca84f3f6 100644 --- a/src/Backups/BackupsWorker.h +++ b/src/Backups/BackupsWorker.h @@ -81,7 +81,6 @@ private: BackupMutablePtr backup, const std::shared_ptr & backup_query, const BackupOperationID & backup_id, - const String & backup_name_for_logging, const BackupSettings & backup_settings, std::shared_ptr backup_coordination, ContextMutablePtr context, @@ -102,7 +101,6 @@ private: void doRestore( const std::shared_ptr & restore_query, const BackupOperationID & restore_id, - const String & backup_name_for_logging, const BackupInfo & backup_info, RestoreSettings restore_settings, std::shared_ptr restore_coordination, diff --git a/src/Backups/IBackupCoordination.h b/src/Backups/IBackupCoordination.h index c0eb90de89b..8bd874b9d0d 100644 --- a/src/Backups/IBackupCoordination.h +++ b/src/Backups/IBackupCoordination.h @@ -20,29 +20,27 @@ class IBackupCoordination public: virtual ~IBackupCoordination() = default; + /// Sets that the backup query was sent to other hosts. + /// Function waitOtherHostsFinish() will check that to find out if it should really wait or not. + virtual void setBackupQueryIsSentToOtherHosts() = 0; + virtual bool isBackupQuerySentToOtherHosts() const = 0; + /// Sets the current stage and waits for other hosts to come to this stage too. virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0; - /// Sets that the backup query was sent to other hosts. - /// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not. - virtual void setBackupQueryWasSentToOtherHosts() = 0; - /// Lets other hosts know that the current host has encountered an error. - virtual bool trySetError(std::exception_ptr exception) = 0; - - /// Lets other hosts know that the current host has finished its work. - virtual void finish() = 0; - - /// Lets other hosts know that the current host has finished its work (as a part of error-handling process). - virtual bool tryFinishAfterError() noexcept = 0; + /// Returns true if the information is successfully passed so other hosts can read it. + virtual bool setError(std::exception_ptr exception, bool throw_if_error) = 0; /// Waits until all the other hosts finish their work. /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. - virtual void waitForOtherHostsToFinish() = 0; + virtual bool waitOtherHostsFinish(bool throw_if_error) const = 0; - /// Waits until all the other hosts finish their work (as a part of error-handling process). - /// Doesn't stops waiting if some host encounters an error or gets cancelled. - virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0; + /// Lets other hosts know that the current host has finished its work. + virtual bool finish(bool throw_if_error) = 0; + + /// Removes temporary nodes in ZooKeeper. + virtual bool cleanup(bool throw_if_error) = 0; struct PartNameAndChecksum { diff --git a/src/Backups/IRestoreCoordination.h b/src/Backups/IRestoreCoordination.h index daabf1745f3..cc7bfd24202 100644 --- a/src/Backups/IRestoreCoordination.h +++ b/src/Backups/IRestoreCoordination.h @@ -18,29 +18,27 @@ class IRestoreCoordination public: virtual ~IRestoreCoordination() = default; + /// Sets that the restore query was sent to other hosts. + /// Function waitOtherHostsFinish() will check that to find out if it should really wait or not. + virtual void setRestoreQueryIsSentToOtherHosts() = 0; + virtual bool isRestoreQuerySentToOtherHosts() const = 0; + /// Sets the current stage and waits for other hosts to come to this stage too. virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0; - /// Sets that the restore query was sent to other hosts. - /// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not. - virtual void setRestoreQueryWasSentToOtherHosts() = 0; - /// Lets other hosts know that the current host has encountered an error. - virtual bool trySetError(std::exception_ptr exception) = 0; - - /// Lets other hosts know that the current host has finished its work. - virtual void finish() = 0; - - /// Lets other hosts know that the current host has finished its work (as a part of error-handling process). - virtual bool tryFinishAfterError() noexcept = 0; + /// Returns true if the information is successfully passed so other hosts can read it. + virtual bool setError(std::exception_ptr exception, bool throw_if_error) = 0; /// Waits until all the other hosts finish their work. /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. - virtual void waitForOtherHostsToFinish() = 0; + virtual bool waitOtherHostsFinish(bool throw_if_error) const = 0; - /// Waits until all the other hosts finish their work (as a part of error-handling process). - /// Doesn't stops waiting if some host encounters an error or gets cancelled. - virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0; + /// Lets other hosts know that the current host has finished its work. + virtual bool finish(bool throw_if_error) = 0; + + /// Removes temporary nodes in ZooKeeper. + virtual bool cleanup(bool throw_if_error) = 0; /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. virtual bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) = 0; diff --git a/src/Backups/RestoreCoordinationLocal.cpp b/src/Backups/RestoreCoordinationLocal.cpp index 569f58f1909..a9eee1fb159 100644 --- a/src/Backups/RestoreCoordinationLocal.cpp +++ b/src/Backups/RestoreCoordinationLocal.cpp @@ -10,9 +10,9 @@ namespace DB { RestoreCoordinationLocal::RestoreCoordinationLocal( - const UUID & restore_uuid, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_) + bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_) : log(getLogger("RestoreCoordinationLocal")) - , concurrency_check(restore_uuid, /* is_restore = */ true, /* on_cluster = */ false, allow_concurrent_restore_, concurrency_counters_) + , concurrency_check(/* is_restore = */ true, /* on_cluster = */ false, /* zookeeper_path = */ "", allow_concurrent_restore_, concurrency_counters_) { } diff --git a/src/Backups/RestoreCoordinationLocal.h b/src/Backups/RestoreCoordinationLocal.h index 6be357c4b7e..6e3262a8a2e 100644 --- a/src/Backups/RestoreCoordinationLocal.h +++ b/src/Backups/RestoreCoordinationLocal.h @@ -17,16 +17,16 @@ class ASTCreateQuery; class RestoreCoordinationLocal : public IRestoreCoordination { public: - RestoreCoordinationLocal(const UUID & restore_uuid_, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_); + RestoreCoordinationLocal(bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_); ~RestoreCoordinationLocal() override; + void setRestoreQueryIsSentToOtherHosts() override {} + bool isRestoreQuerySentToOtherHosts() const override { return false; } Strings setStage(const String &, const String &, bool) override { return {}; } - void setRestoreQueryWasSentToOtherHosts() override {} - bool trySetError(std::exception_ptr) override { return true; } - void finish() override {} - bool tryFinishAfterError() noexcept override { return true; } - void waitForOtherHostsToFinish() override {} - bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; } + bool setError(std::exception_ptr, bool) override { return true; } + bool waitOtherHostsFinish(bool) const override { return true; } + bool finish(bool) override { return true; } + bool cleanup(bool) override { return true; } /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override; diff --git a/src/Backups/RestoreCoordinationOnCluster.cpp b/src/Backups/RestoreCoordinationOnCluster.cpp index 2029ad8b072..fad7341c044 100644 --- a/src/Backups/RestoreCoordinationOnCluster.cpp +++ b/src/Backups/RestoreCoordinationOnCluster.cpp @@ -35,17 +35,21 @@ RestoreCoordinationOnCluster::RestoreCoordinationOnCluster( , current_host_index(BackupCoordinationOnCluster::findCurrentHostIndex(current_host, all_hosts)) , log(getLogger("RestoreCoordinationOnCluster")) , with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); }) - , concurrency_check(restore_uuid_, /* is_restore = */ true, /* on_cluster = */ true, allow_concurrent_restore_, concurrency_counters_) - , stage_sync(/* is_restore = */ true, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_restore_, with_retries, schedule_, process_list_element_, log) - , cleaner(zookeeper_path, with_retries, log) + , cleaner(/* is_restore = */ true, zookeeper_path, with_retries, log) + , stage_sync(/* is_restore = */ true, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_restore_, concurrency_counters_, with_retries, schedule_, process_list_element_, log) { - createRootNodes(); + try + { + createRootNodes(); + } + catch (...) + { + stage_sync.setError(std::current_exception(), /* throw_if_error = */ false); + throw; + } } -RestoreCoordinationOnCluster::~RestoreCoordinationOnCluster() -{ - tryFinishImpl(); -} +RestoreCoordinationOnCluster::~RestoreCoordinationOnCluster() = default; void RestoreCoordinationOnCluster::createRootNodes() { @@ -66,69 +70,52 @@ void RestoreCoordinationOnCluster::createRootNodes() }); } +void RestoreCoordinationOnCluster::setRestoreQueryIsSentToOtherHosts() +{ + stage_sync.setQueryIsSentToOtherHosts(); +} + +bool RestoreCoordinationOnCluster::isRestoreQuerySentToOtherHosts() const +{ + return stage_sync.isQuerySentToOtherHosts(); +} + Strings RestoreCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync) { stage_sync.setStage(new_stage, message); - - if (!sync) - return {}; - - return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator); + if (sync) + return stage_sync.waitHostsReachStage(all_hosts_without_initiator, new_stage); + return {}; } -void RestoreCoordinationOnCluster::setRestoreQueryWasSentToOtherHosts() +bool RestoreCoordinationOnCluster::setError(std::exception_ptr exception, bool throw_if_error) { - restore_query_was_sent_to_other_hosts = true; + return stage_sync.setError(exception, throw_if_error); } -bool RestoreCoordinationOnCluster::trySetError(std::exception_ptr exception) +bool RestoreCoordinationOnCluster::waitOtherHostsFinish(bool throw_if_error) const { - return stage_sync.trySetError(exception); + return stage_sync.waitOtherHostsFinish(throw_if_error); } -void RestoreCoordinationOnCluster::finish() +bool RestoreCoordinationOnCluster::finish(bool throw_if_error) { - bool other_hosts_also_finished = false; - stage_sync.finish(other_hosts_also_finished); - - if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts)) - cleaner.cleanup(); + return stage_sync.finish(throw_if_error); } -bool RestoreCoordinationOnCluster::tryFinishAfterError() noexcept +bool RestoreCoordinationOnCluster::cleanup(bool throw_if_error) { - return tryFinishImpl(); -} - -bool RestoreCoordinationOnCluster::tryFinishImpl() noexcept -{ - bool other_hosts_also_finished = false; - if (!stage_sync.tryFinishAfterError(other_hosts_also_finished)) - return false; - - if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts)) + /// All the hosts must finish before we remove the coordination nodes. + bool expect_other_hosts_finished = stage_sync.isQuerySentToOtherHosts() || !stage_sync.isErrorSet(); + bool all_hosts_finished = stage_sync.finished() && (stage_sync.otherHostsFinished() || !expect_other_hosts_finished); + if (!all_hosts_finished) { - if (!cleaner.tryCleanupAfterError()) - return false; - } - - return true; -} - -void RestoreCoordinationOnCluster::waitForOtherHostsToFinish() -{ - if ((current_host != kInitiator) || !restore_query_was_sent_to_other_hosts) - return; - stage_sync.waitForOtherHostsToFinish(); -} - -bool RestoreCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept -{ - if (current_host != kInitiator) + auto unfinished_hosts = expect_other_hosts_finished ? stage_sync.getUnfinishedHosts() : Strings{current_host}; + LOG_INFO(log, "Skipping removing nodes from ZooKeeper because hosts {} didn't finish", + BackupCoordinationStageSync::getHostsDesc(unfinished_hosts)); return false; - if (!restore_query_was_sent_to_other_hosts) - return true; - return stage_sync.tryWaitForOtherHostsToFinishAfterError(); + } + return cleaner.cleanup(throw_if_error); } ZooKeeperRetriesInfo RestoreCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const diff --git a/src/Backups/RestoreCoordinationOnCluster.h b/src/Backups/RestoreCoordinationOnCluster.h index 87a8dd3ce83..99929cbdac3 100644 --- a/src/Backups/RestoreCoordinationOnCluster.h +++ b/src/Backups/RestoreCoordinationOnCluster.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include @@ -15,7 +14,7 @@ class RestoreCoordinationOnCluster : public IRestoreCoordination { public: /// Empty string as the current host is used to mark the initiator of a RESTORE ON CLUSTER query. - static const constexpr std::string_view kInitiator; + static const constexpr std::string_view kInitiator = BackupCoordinationStageSync::kInitiator; RestoreCoordinationOnCluster( const UUID & restore_uuid_, @@ -31,13 +30,13 @@ public: ~RestoreCoordinationOnCluster() override; + void setRestoreQueryIsSentToOtherHosts() override; + bool isRestoreQuerySentToOtherHosts() const override; Strings setStage(const String & new_stage, const String & message, bool sync) override; - void setRestoreQueryWasSentToOtherHosts() override; - bool trySetError(std::exception_ptr exception) override; - void finish() override; - bool tryFinishAfterError() noexcept override; - void waitForOtherHostsToFinish() override; - bool tryWaitForOtherHostsToFinishAfterError() noexcept override; + bool setError(std::exception_ptr exception, bool throw_if_error) override; + bool waitOtherHostsFinish(bool throw_if_error) const override; + bool finish(bool throw_if_error) override; + bool cleanup(bool throw_if_error) override; /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override; @@ -78,11 +77,10 @@ private: const size_t current_host_index; LoggerPtr const log; + /// The order is important: `stage_sync` must be initialized after `with_retries` and `cleaner`. const WithRetries with_retries; - BackupConcurrencyCheck concurrency_check; - BackupCoordinationStageSync stage_sync; BackupCoordinationCleaner cleaner; - std::atomic restore_query_was_sent_to_other_hosts = false; + BackupCoordinationStageSync stage_sync; }; } From 19bcc5550bad0444d652d760edddbe15fe0611da Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 11 Nov 2024 01:36:20 +0100 Subject: [PATCH 411/566] Fix tests. --- .../test_cancel_backup.py | 29 +++++++------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py b/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py index f63dc2aef3d..4ad53acc735 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py +++ b/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py @@ -251,23 +251,16 @@ def kill_query( if is_initial_query is not None else "" ) + old_time = time.monotonic() node.query( f"KILL QUERY WHERE (query_kind='{query_kind}') AND (query LIKE '%{id}%'){filter_for_is_initial_query} SYNC" ) - node.query("SYSTEM FLUSH LOGS") - duration = ( - int( - node.query( - f"SELECT query_duration_ms FROM system.query_log WHERE query_kind='KillQuery' AND query LIKE '%{id}%' AND type='QueryFinish'" - ) - ) - / 1000 - ) + waited = time.monotonic() - old_time print( - f"{get_node_name(node)}: Cancelled {operation_name} {id} after {duration} seconds" + f"{get_node_name(node)}: Cancelled {operation_name} {id} after {waited} seconds" ) if timeout is not None: - assert duration < timeout + assert waited < timeout # Stops all ZooKeeper servers. @@ -305,7 +298,7 @@ def sleep(seconds): class NoTrashChecker: def __init__(self): self.expect_backups = [] - self.expect_unfinished_backups = [] + self.allow_unfinished_backups = [] self.expect_errors = [] self.allow_errors = [] self.check_zookeeper = True @@ -373,7 +366,7 @@ class NoTrashChecker: if unfinished_backups: print(f"Found unfinished backups: {unfinished_backups}") assert new_backups == set(self.expect_backups) - assert unfinished_backups == set(self.expect_unfinished_backups) + assert unfinished_backups.difference(self.allow_unfinished_backups) == set() all_errors = set() start_time = time.strftime( @@ -641,7 +634,7 @@ def test_long_disconnection_stops_backup(): assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" assert get_num_system_processes(initiator, backup_id=backup_id) >= 1 - no_trash_checker.expect_unfinished_backups = [backup_id] + no_trash_checker.allow_unfinished_backups = [backup_id] no_trash_checker.allow_errors = [ "FAILED_TO_SYNC_BACKUP_OR_RESTORE", "KEEPER_EXCEPTION", @@ -674,7 +667,7 @@ def test_long_disconnection_stops_backup(): # A backup is expected to fail, but it isn't expected to fail too soon. print(f"Backup failed after {time_to_fail} seconds disconnection") assert time_to_fail > 3 - assert time_to_fail < 30 + assert time_to_fail < 35 # A backup must NOT be stopped if Zookeeper is disconnected shorter than `failure_after_host_disconnected_for_seconds`. @@ -695,7 +688,7 @@ def test_short_disconnection_doesnt_stop_backup(): backup_id = random_id() initiator.query( f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC", - settings={"backup_restore_failure_after_host_disconnected_for_seconds": 6}, + settings={"backup_restore_failure_after_host_disconnected_for_seconds": 10}, ) assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" @@ -703,13 +696,13 @@ def test_short_disconnection_doesnt_stop_backup(): # Dropping connection for less than `failure_after_host_disconnected_for_seconds` with PartitionManager() as pm: - random_sleep(3) + random_sleep(4) node_to_drop_zk_connection = random_node() print( f"Dropping connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" ) pm.drop_instance_zk_connections(node_to_drop_zk_connection) - random_sleep(3) + random_sleep(4) print( f"Restoring connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" ) From c4946cf1594c6083c01a39954495aea1be01f574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=D1=81hael=20Stetsyuk?= <59827607+mstetsyuk@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:41:26 +0000 Subject: [PATCH 412/566] style fix --- src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index c73c9f6d048..addaeb65350 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -29,7 +29,6 @@ namespace MergeTreeSetting namespace ErrorCodes { extern const int REPLICA_IS_ALREADY_ACTIVE; - extern const int REPLICA_STATUS_CHANGED; extern const int LOGICAL_ERROR; extern const int SUPPORT_IS_DISABLED; } From 05dfc6dbdba48964cfd147a3635613966da78f0a Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Nov 2024 11:53:24 +0100 Subject: [PATCH 413/566] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c6223bef2b2..7eb8455a169 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -76,6 +76,7 @@ static std::initializer_list Date: Mon, 11 Nov 2024 13:26:31 +0200 Subject: [PATCH 414/566] Fix typo Fix log message for more clean understanding --- docker/server/entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 2f87008f2e5..947244dd97f 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -162,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then tries=${CLICKHOUSE_INIT_TIMEOUT:-1000} while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do if [ "$tries" -le "0" ]; then - echo >&2 'ClickHouse init process failed.' + echo >&2 'ClickHouse init process timeout.' exit 1 fi tries=$(( tries-1 )) From 386e16bee24f5bbec50d7b99a1e302c2d01df592 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Nov 2024 12:36:09 +0100 Subject: [PATCH 415/566] Better --- src/Interpreters/Cache/FileCache.cpp | 15 +++--------- src/Interpreters/Cache/FileCache.h | 2 ++ src/Interpreters/Cache/FileCacheUtils.h | 17 +++++++++++++ src/Interpreters/Cache/FileSegment.cpp | 32 +++++++++++++++++++++---- src/Interpreters/Cache/FileSegment.h | 3 +++ src/Interpreters/Cache/Metadata.cpp | 30 ++++++++--------------- 6 files changed, 63 insertions(+), 36 deletions(-) create mode 100644 src/Interpreters/Cache/FileCacheUtils.h diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index bda91b31692..47b5779dd1a 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -53,16 +54,6 @@ namespace ErrorCodes namespace { - size_t roundDownToMultiple(size_t num, size_t multiple) - { - return (num / multiple) * multiple; - } - - size_t roundUpToMultiple(size_t num, size_t multiple) - { - return roundDownToMultiple(num + multiple - 1, multiple); - } - std::string getCommonUserID() { auto user_from_context = DB::Context::getGlobalContextInstance()->getFilesystemCacheUser(); @@ -605,8 +596,8 @@ FileCache::getOrSet( /// 2. max_file_segments_limit FileSegment::Range result_range = initial_range; - const auto aligned_offset = roundDownToMultiple(initial_range.left, boundary_alignment); - auto aligned_end_offset = std::min(roundUpToMultiple(initial_range.right + 1, boundary_alignment), file_size) - 1; + const auto aligned_offset = FileCacheUtils::roundDownToMultiple(initial_range.left, boundary_alignment); + auto aligned_end_offset = std::min(FileCacheUtils::roundUpToMultiple(initial_range.right + 1, boundary_alignment), file_size) - 1; chassert(aligned_offset <= initial_range.left); chassert(aligned_end_offset >= initial_range.right); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 79966e60ad9..3a9241fd351 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -163,6 +163,8 @@ public: size_t getBackgroundDownloadMaxFileSegmentSize() const { return metadata.getBackgroundDownloadMaxFileSegmentSize(); } + size_t getBoundaryAlignment() const { return boundary_alignment; } + bool tryReserve( FileSegment & file_segment, size_t size, diff --git a/src/Interpreters/Cache/FileCacheUtils.h b/src/Interpreters/Cache/FileCacheUtils.h new file mode 100644 index 00000000000..b35ce867a79 --- /dev/null +++ b/src/Interpreters/Cache/FileCacheUtils.h @@ -0,0 +1,17 @@ +#pragma once +#include + +namespace FileCacheUtils +{ + +static size_t roundDownToMultiple(size_t num, size_t multiple) +{ + return (num / multiple) * multiple; +} + +static size_t roundUpToMultiple(size_t num, size_t multiple) +{ + return roundDownToMultiple(num + multiple - 1, multiple); +} + +} diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 2455461435b..05b0853da07 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -629,6 +630,31 @@ void FileSegment::completePartAndResetDownloader() LOG_TEST(log, "Complete batch. ({})", getInfoForLogUnlocked(lk)); } +size_t FileSegment::getSizeForBackgroundDownload() const +{ + auto lk = lock(); + return getSizeForBackgroundDownloadUnlocked(lk); +} + +size_t FileSegment::getSizeForBackgroundDownloadUnlocked(const FileSegmentGuard::Lock &) const +{ + if (!background_download_enabled + || !downloaded_size + || !remote_file_reader) + { + return 0; + } + + const size_t background_download_max_file_segment_size = cache->getBackgroundDownloadMaxFileSegmentSize(); + size_t desired_size; + if (downloaded_size >= background_download_max_file_segment_size) + desired_size = FileCacheUtils::roundUpToMultiple(downloaded_size, cache->getBoundaryAlignment()); + else + desired_size = FileCacheUtils::roundUpToMultiple(background_download_max_file_segment_size, cache->getBoundaryAlignment()); + + return desired_size - downloaded_size; +} + void FileSegment::complete(bool allow_background_download) { ProfileEventTimeIncrement watch(ProfileEvents::FileSegmentCompleteMicroseconds); @@ -708,10 +734,8 @@ void FileSegment::complete(bool allow_background_download) if (is_last_holder) { bool added_to_download_queue = false; - if (allow_background_download - && background_download_enabled - && remote_file_reader - && downloaded_size < cache->getBackgroundDownloadMaxFileSegmentSize()) + size_t background_download_size = allow_background_download ? getSizeForBackgroundDownloadUnlocked(segment_lock) : 0; + if (background_download_size) { ProfileEvents::increment(ProfileEvents::FilesystemCacheBackgroundDownloadQueuePush); added_to_download_queue = locked_key->addToDownloadQueue(offset(), segment_lock); /// Finish download in background. diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 21d5f9dab5f..a6bfb203cec 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -185,6 +185,8 @@ public: bool assertCorrectness() const; + size_t getSizeForBackgroundDownload() const; + /** * ========== Methods that must do cv.notify() ================== */ @@ -230,6 +232,7 @@ private: String getDownloaderUnlocked(const FileSegmentGuard::Lock &) const; bool isDownloaderUnlocked(const FileSegmentGuard::Lock & segment_lock) const; void resetDownloaderUnlocked(const FileSegmentGuard::Lock &); + size_t getSizeForBackgroundDownloadUnlocked(const FileSegmentGuard::Lock &) const; void setDownloadState(State state, const FileSegmentGuard::Lock &); void resetDownloadingStateUnlocked(const FileSegmentGuard::Lock &); diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 2ef8f76aca0..45077f09020 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -632,9 +632,6 @@ void CacheMetadata::downloadThreadFunc(const bool & stop_flag) auto & file_segment = holder->front(); - if (file_segment.getDownloadedSize() >= download_max_file_segment_size) - continue; - if (file_segment.getOrSetDownloader() != FileSegment::getCallerId()) continue; @@ -681,6 +678,10 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optionalinternalBuffer().empty()) { if (!memory) - memory.emplace(DBMS_DEFAULT_BUFFER_SIZE); + memory.emplace(std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), size_to_download)); reader->set(memory->data(), memory->size()); } @@ -706,24 +707,13 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional(reader->getPosition())) reader->seek(offset, SEEK_SET); - bool stop = false; - const size_t max_file_segment_size = download_max_file_segment_size.load(); - - while (!stop && !reader->eof()) + while (size_to_download && !reader->eof()) { - auto size = reader->available(); + const auto available = reader->available(); + chassert(available); - const size_t downloaded_size = file_segment.getDownloadedSize(); - if (downloaded_size >= max_file_segment_size) - break; - - if (downloaded_size + size > max_file_segment_size) - { - /// Do not download more than download_max_file_segment_size - /// because we want to leave right boundary of file segment aligned. - size = max_file_segment_size - downloaded_size; - stop = true; - } + const auto size = std::min(available, size_to_download); + size_to_download -= size; std::string failure_reason; if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds, failure_reason)) From 39e01d47b1892b2049d18fc19803949d1bfcda51 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 11 Nov 2024 11:54:04 +0000 Subject: [PATCH 416/566] Fix style check --- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index fbe2993b7c6..494326c0ed0 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -227,7 +227,7 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} children={}", step->getName(), children.size()); if (children.empty()) { @@ -347,7 +347,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr } if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Chosen query: {}", res->dumpTree()); return res; } From 33f9e8bc2e5540386e5ccf7fec591eaa1bf5cc24 Mon Sep 17 00:00:00 2001 From: nauu Date: Mon, 11 Nov 2024 20:25:57 +0800 Subject: [PATCH 417/566] fix error --- src/IO/S3/URI.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index ad746ff3326..aefe3ff338c 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -117,7 +117,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) is_virtual_hosted_style = true; if (name == "oss-data-acc") { - bucket = bucket.substr(0, bucket.find(".")); + bucket = bucket.substr(0, bucket.find('.')); endpoint = uri.getScheme() + "://" + uri.getHost().substr(bucket.length() + 1); } else From 5f0f2628b15988fda3467b50e21be9a149fc9eb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Nov 2024 13:46:50 +0100 Subject: [PATCH 418/566] Avoid failures on fault injection --- tests/docker_scripts/attach_gdb.lib | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/docker_scripts/attach_gdb.lib b/tests/docker_scripts/attach_gdb.lib index 4170a19176c..f8a08b5e39d 100644 --- a/tests/docker_scripts/attach_gdb.lib +++ b/tests/docker_scripts/attach_gdb.lib @@ -5,7 +5,8 @@ source /repo/tests/docker_scripts/utils.lib function attach_gdb_to_clickhouse() { - IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") + # Use retries to avoid failures due to fault injections + IS_ASAN=$(run_with_retry 5 clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") if [[ "$IS_ASAN" = "1" ]]; then echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections" From 17f7097d5b66129a3f72f98114cd28575ed839dc Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Nov 2024 13:28:52 +0000 Subject: [PATCH 419/566] Fix CAST from LowCardinality(Nullable) to Dynamic --- src/Functions/FunctionsConversion.cpp | 2 +- ...3261_low_cardinality_nullable_to_dynamic_cast.reference | 2 ++ .../03261_low_cardinality_nullable_to_dynamic_cast.sql | 7 +++++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference create mode 100644 tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0f6311c9716..5f1583f6e71 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4390,7 +4390,7 @@ private: variant_column = IColumn::mutate(column); /// Otherwise we should filter column. else - variant_column = column->filter(filter, variant_size_hint)->assumeMutable(); + variant_column = IColumn::mutate(column->filter(filter, variant_size_hint)); assert_cast(*variant_column).nestedRemoveNullable(); return createVariantFromDescriptorsAndOneNonEmptyVariant(variant_types, std::move(discriminators), std::move(variant_column), variant_discr); diff --git a/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference new file mode 100644 index 00000000000..96e34d5a44c --- /dev/null +++ b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference @@ -0,0 +1,2 @@ +\N +\N diff --git a/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql new file mode 100644 index 00000000000..fdb497a62bf --- /dev/null +++ b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql @@ -0,0 +1,7 @@ +SET allow_suspicious_low_cardinality_types = 1, allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 LowCardinality(Nullable(Int))) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (NULL); +SELECT c0::Dynamic FROM t0; +SELECT c0 FROM t0; +DROP TABLE t0; From 288756bc9aede92c6d005af34be94973a5d78203 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 13:32:01 +0000 Subject: [PATCH 420/566] Fix for stateful functions. --- .../QueryPlan/BuildQueryPipelineSettings.cpp | 13 ++++++++++++- .../QueryPlan/BuildQueryPipelineSettings.h | 2 ++ src/Processors/QueryPlan/FilterStep.cpp | 12 ++++++++++-- .../queries/0_stateless/03199_merge_filters_bug.sql | 2 +- 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp index fb3ed7f80fc..ce02ef8b9ba 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp @@ -6,12 +6,23 @@ namespace DB { +namespace Setting +{ + extern const SettingsBool query_plan_merge_filters; +} + BuildQueryPipelineSettings BuildQueryPipelineSettings::fromContext(ContextPtr from) { + const auto & query_settings = from->getSettingsRef(); BuildQueryPipelineSettings settings; - settings.actions_settings = ExpressionActionsSettings::fromSettings(from->getSettingsRef(), CompileExpressions::yes); + settings.actions_settings = ExpressionActionsSettings::fromSettings(query_settings, CompileExpressions::yes); settings.process_list_element = from->getProcessListElement(); settings.progress_callback = from->getProgressCallback(); + + /// Setting query_plan_merge_filters is enabled by default. + /// But it can brake short-circuit without splitting fiter step into smaller steps. + /// So, enable and disable this optimizations together. + settings.enable_multiple_filters_transforms_for_and_chain = query_settings[Setting::query_plan_merge_filters]; return settings; } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index d99f9a7d1f1..6219e37db58 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -17,6 +17,8 @@ using TemporaryFileLookupPtr = std::shared_ptr; struct BuildQueryPipelineSettings { + bool enable_multiple_filters_transforms_for_and_chain = true; + ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 3d56a2352dc..a6b157cdd1d 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -139,7 +139,11 @@ FilterStep::FilterStep( void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + std::vector and_atoms; + + if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag), settings.getActionsSettings()); @@ -178,7 +182,11 @@ void FilterStep::describeActions(FormatSettings & settings) const String prefix(settings.offset, settings.indent_char); auto cloned_dag = actions_dag.clone(); - auto and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + + std::vector and_atoms; + if (!actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag)); diff --git a/tests/queries/0_stateless/03199_merge_filters_bug.sql b/tests/queries/0_stateless/03199_merge_filters_bug.sql index ed2ec2ea217..bb2a4255a3d 100644 --- a/tests/queries/0_stateless/03199_merge_filters_bug.sql +++ b/tests/queries/0_stateless/03199_merge_filters_bug.sql @@ -49,7 +49,7 @@ tmp1 AS fs1 FROM t2 LEFT JOIN tmp1 USING (fs1) - WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0; + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; optimize table t1 final; From 8c2e541392e552343431a6b9b411ee55f37e8fe8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 14:27:48 +0000 Subject: [PATCH 421/566] Avoid using manes in multistage prewhere optimization. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 110 ++++++++++-------- 1 file changed, 60 insertions(+), 50 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 9c82817e8cb..73fe2600946 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -57,9 +58,9 @@ struct DAGNodeRef const ActionsDAG::Node * node; }; -/// Result name -> DAGNodeRef -using OriginalToNewNodeMap = std::unordered_map; -using NodeNameToLastUsedStepMap = std::unordered_map; +/// Result -> DAGNodeRef +using OriginalToNewNodeMap = std::unordered_map; +using NodeNameToLastUsedStepMap = std::unordered_map; /// Clones the part of original DAG responsible for computing the original_dag_node and adds it to the new DAG. const ActionsDAG::Node & addClonedDAGToDAG( @@ -69,12 +70,12 @@ const ActionsDAG::Node & addClonedDAGToDAG( OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { - const String & node_name = original_dag_node->result_name; + //const String & node_name = original_dag_node->result_name; /// Look for the node in the map of already known nodes - if (node_remap.contains(node_name)) + if (node_remap.contains(original_dag_node)) { /// If the node is already in the new DAG, return it - const auto & node_ref = node_remap.at(node_name); + const auto & node_ref = node_remap.at(original_dag_node); if (node_ref.dag == new_dag.get()) return *node_ref.node; @@ -83,11 +84,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( { node_ref.dag->addOrReplaceInOutputs(*node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[node_name] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. - node_to_step_map[node_name] = step; + node_to_step_map[original_dag_node] = step; return new_node; } } @@ -96,7 +97,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( if (original_dag_node->type == ActionsDAG::ActionType::INPUT) { const auto & new_node = new_dag->addInput(original_dag_node->result_name, original_dag_node->result_type); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -105,7 +106,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & new_node = new_dag->addColumn( ColumnWithTypeAndName(original_dag_node->column, original_dag_node->result_type, original_dag_node->result_name)); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -113,7 +114,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & alias_child = addClonedDAGToDAG(step, original_dag_node->children[0], new_dag, node_remap, node_to_step_map); const auto & new_node = new_dag->addAlias(alias_child, original_dag_node->result_name); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -128,7 +129,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const auto & new_node = new_dag->addFunction(original_dag_node->function_base, new_children, original_dag_node->result_name); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -138,11 +139,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( const ActionsDAG::Node & addFunction( const ActionsDAGPtr & new_dag, const FunctionOverloadResolverPtr & function, - ActionsDAG::NodeRawConstPtrs children, - OriginalToNewNodeMap & node_remap) + ActionsDAG::NodeRawConstPtrs children) + //OriginalToNewNodeMap & node_remap) { const auto & new_node = new_dag->addFunction(function, children, ""); - node_remap[new_node.result_name] = {new_dag.get(), &new_node}; + //node_remap[new_node.result_name] = {new_dag.get(), &new_node}; return new_node; } @@ -152,14 +153,14 @@ const ActionsDAG::Node & addFunction( const ActionsDAG::Node & addCast( const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, - const DataTypePtr & to_type, - OriginalToNewNodeMap & node_remap) + const DataTypePtr & to_type) + //[[maybe_unused]] OriginalToNewNodeMap & node_remap) { if (!node_to_cast.result_type->equals(*to_type)) return node_to_cast; const auto & new_node = dag->addCast(node_to_cast, to_type, {}); - node_remap[new_node.result_name] = {dag.get(), &new_node}; + //node_remap[new_node.result_name] = {dag.get(), &new_node}; return new_node; } @@ -169,8 +170,8 @@ const ActionsDAG::Node & addCast( /// 2. makes sure that the result contains only 0 or 1 values even if the source column contains non-boolean values. const ActionsDAG::Node & addAndTrue( const ActionsDAGPtr & dag, - const ActionsDAG::Node & filter_node_to_normalize, - OriginalToNewNodeMap & node_remap) + const ActionsDAG::Node & filter_node_to_normalize) + //OriginalToNewNodeMap & node_remap) { Field const_true_value(true); @@ -181,7 +182,7 @@ const ActionsDAG::Node & addAndTrue( const auto * const_true_node = &dag->addColumn(std::move(const_true_column)); ActionsDAG::NodeRawConstPtrs children = {&filter_node_to_normalize, const_true_node}; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - return addFunction(dag, func_builder_and, children, node_remap); + return addFunction(dag, func_builder_and, children); //, node_remap); } } @@ -243,7 +244,11 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction struct Step { ActionsDAGPtr actions; - String column_name; + /// Original condition, in case if we have only one condition, and it was not casted + const ActionsDAG::Node * original_node; + /// Result condition node + const ActionsDAG::Node * result_node; + //String column_name; }; std::vector steps; @@ -254,7 +259,9 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { const auto & condition_group = condition_groups[step_index]; ActionsDAGPtr step_dag = std::make_unique(); - String result_name; + const ActionsDAG::Node * original_node = nullptr; + const ActionsDAG::Node * result_node; + //String result_name; std::vector new_condition_nodes; for (const auto * node : condition_group) @@ -267,48 +274,47 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { /// Add AND function to combine the conditions FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes, node_remap); - step_dag->addOrReplaceInOutputs(and_function_node); - result_name = and_function_node.result_name; + const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); //, node_remap); + //step_dag->addOrReplaceInOutputs(and_function_node); + result_node = &and_function_node; } else { - const auto & result_node = *new_condition_nodes.front(); + result_node = new_condition_nodes.front(); /// Check if explicit cast is needed for the condition to serve as a filter. - const auto result_type_name = result_node.result_type->getName(); - if (result_type_name == "UInt8" || - result_type_name == "Nullable(UInt8)" || - result_type_name == "LowCardinality(UInt8)" || - result_type_name == "LowCardinality(Nullable(UInt8))") + //const auto result_type_name = result_node->result_type->getName(); + if (isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) { /// No need to cast - step_dag->addOrReplaceInOutputs(result_node); - result_name = result_node.result_name; + //step_dag->addOrReplaceInOutputs(result_node); + //result_name = result_node.result_name; } else { /// Build "condition AND True" expression to "cast" the condition to UInt8 or Nullable(UInt8) depending on its type. - const auto & cast_node = addAndTrue(step_dag, result_node, node_remap); - step_dag->addOrReplaceInOutputs(cast_node); - result_name = cast_node.result_name; + result_node = &addAndTrue(step_dag, *result_node); //, node_remap); + //step_dag->addOrReplaceInOutputs(cast_node); + //result_name = &cast_node.result_name; } } - steps.push_back({std::move(step_dag), result_name}); + step_dag->getOutputs().insert(step_dag->getOutputs().begin(), result_node); + steps.push_back({std::move(step_dag), original_node, result_node}); } /// 6. Find all outputs of the original DAG auto original_outputs = prewhere_info->prewhere_actions.getOutputs(); + steps.back().actions->getOutputs().clear(); /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 - NameSet all_output_names; + std::unordered_set all_outputs; for (const auto * output : original_outputs) { - all_output_names.insert(output->result_name); - if (node_remap.contains(output->result_name)) + all_outputs.insert(output); + if (node_remap.contains(output)) //->result_name)) { - const auto & new_node_info = node_remap[output->result_name]; - new_node_info.dag->addOrReplaceInOutputs(*new_node_info.node); + const auto & new_node_info = node_remap[output]; + new_node_info.dag->getOutputs().push_back(new_node_info.node); } else if (output->result_name == prewhere_info->prewhere_column_name) { @@ -319,20 +325,23 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// 1. AND the last condition with constant True. This is needed to make sure that in the last step filter has UInt8 type /// but contains values other than 0 and 1 (e.g. if it is (number%5) it contains 2,3,4) /// 2. CAST the result to the exact type of the PREWHERE column from the original DAG - const auto & last_step_result_node_info = node_remap[steps.back().column_name]; + //const auto & last_step_result_node_info = node_remap[steps.back().column_name]; auto & last_step_dag = steps.back().actions; + auto & last_step_result_node = steps.back().result_node; /// Build AND(last_step_result_node, true) - const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node_info.node, node_remap); + const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node); //, node_remap); /// Build CAST(and_node, type of PREWHERE column) - const auto & cast_node = addCast(last_step_dag, and_node, output->result_type, node_remap); + const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); //, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); - last_step_dag->addOrReplaceInOutputs(prewhere_result_node); + //last_step_dag->addOrReplaceInOutputs(prewhere_result_node); + last_step_dag->getOutputs().push_back(&prewhere_result_node); + steps.back().result_node = &prewhere_result_node; } else { const auto & node_in_new_dag = addClonedDAGToDAG(steps.size() - 1, output, steps.back().actions, node_remap, node_to_step); - steps.back().actions->addOrReplaceInOutputs(node_in_new_dag); + steps.back().actions->getOutputs().push_back(&node_in_new_dag); } } @@ -345,10 +354,10 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { .type = PrewhereExprStep::Filter, .actions = std::make_shared(std::move(*step.actions), actions_settings), - .filter_column_name = step.column_name, + .filter_column_name = step.result_node->result_name, /// Don't remove if it's in the list of original outputs .remove_filter_column = - !all_output_names.contains(step.column_name) && node_to_step[step.column_name] <= step_index, + step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, .need_filter = false, .perform_alter_conversions = true, }; @@ -356,6 +365,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction prewhere.steps.push_back(std::make_shared(std::move(new_step))); } + prewhere.steps.back()->remove_filter_column = prewhere_info->remove_prewhere_column; prewhere.steps.back()->need_filter = prewhere_info->need_filter; } From bcab2d51aa47f66d88ecc1c17463e2754260826d Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Nov 2024 15:58:06 +0100 Subject: [PATCH 422/566] Use get_parameter_from_ssm in ci_buddy --- tests/ci/ci_buddy.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 164af72f4be..07b748180cd 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -3,14 +3,13 @@ import json import os from typing import Dict, List, Union -import boto3 import requests from botocore.exceptions import ClientError from ci_config import CI from ci_utils import WithIter from commit_status_helper import get_commit_filtered_statuses, get_repo -from get_robot_token import get_best_robot_token +from get_robot_token import get_best_robot_token, get_parameter_from_ssm from github_helper import GitHub from pr_info import PRInfo @@ -89,15 +88,9 @@ class CIBuddy: def _get_webhooks(): name = "ci_buddy_web_hooks" - session = boto3.Session(region_name="us-east-1") # Replace with your region - ssm_client = session.client("ssm") json_string = None try: - response = ssm_client.get_parameter( - Name=name, - WithDecryption=True, # Set to True if the parameter is a SecureString - ) - json_string = response["Parameter"]["Value"] + json_string = get_parameter_from_ssm(name, decrypt=True) except ClientError as e: print(f"An error occurred: {e}") From 5c5016218b77cf77323c126bc064d90601beadef Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 15:05:53 +0000 Subject: [PATCH 423/566] Fixing style. --- src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp index ce02ef8b9ba..1832cc2ad42 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp @@ -20,7 +20,7 @@ BuildQueryPipelineSettings BuildQueryPipelineSettings::fromContext(ContextPtr fr settings.progress_callback = from->getProgressCallback(); /// Setting query_plan_merge_filters is enabled by default. - /// But it can brake short-circuit without splitting fiter step into smaller steps. + /// But it can brake short-circuit without splitting filter step into smaller steps. /// So, enable and disable this optimizations together. settings.enable_multiple_filters_transforms_for_and_chain = query_settings[Setting::query_plan_merge_filters]; return settings; From b7d80728190f1e56de3739186543afb575cf2063 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 11 Nov 2024 16:06:17 +0100 Subject: [PATCH 424/566] Add waiting for prometheus instances to start before running test "test_prometheus_protocols". --- tests/integration/helpers/cluster.py | 25 ++++++++++++++++++- .../test_prometheus_protocols/test.py | 4 +-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index b24593602ec..a0c2e1d1a70 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -744,11 +744,13 @@ class ClickHouseCluster: # available when with_prometheus == True self.with_prometheus = False self.prometheus_writer_host = "prometheus_writer" + self.prometheus_writer_ip = None self.prometheus_writer_port = 9090 self.prometheus_writer_logs_dir = p.abspath( p.join(self.instances_dir, "prometheus_writer/logs") ) self.prometheus_reader_host = "prometheus_reader" + self.prometheus_reader_ip = None self.prometheus_reader_port = 9091 self.prometheus_reader_logs_dir = p.abspath( p.join(self.instances_dir, "prometheus_reader/logs") @@ -2728,6 +2730,16 @@ class ClickHouseCluster: raise Exception("Can't wait LDAP to start") + def wait_prometheus_to_start(self): + self.prometheus_reader_ip = self.get_instance_ip(self.prometheus_reader_host) + self.prometheus_writer_ip = self.get_instance_ip(self.prometheus_writer_host) + self.wait_for_url( + f"http://{self.prometheus_reader_ip}:{self.prometheus_reader_port}/api/v1/query?query=time()" + ) + self.wait_for_url( + f"http://{self.prometheus_writer_ip}:{self.prometheus_writer_port}/api/v1/query?query=time()" + ) + def start(self): pytest_xdist_logging_to_separate_files.setup() logging.info("Running tests in {}".format(self.base_path)) @@ -3083,12 +3095,23 @@ class ClickHouseCluster: f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping" ) - if self.with_prometheus: + if self.with_prometheus and self.base_prometheus_cmd: os.makedirs(self.prometheus_writer_logs_dir) os.chmod(self.prometheus_writer_logs_dir, stat.S_IRWXU | stat.S_IRWXO) os.makedirs(self.prometheus_reader_logs_dir) os.chmod(self.prometheus_reader_logs_dir, stat.S_IRWXU | stat.S_IRWXO) + prometheus_start_cmd = self.base_prometheus_cmd + common_opts + + logging.info( + "Trying to create Prometheus instances by command %s", + " ".join(map(str, prometheus_start_cmd)), + ) + run_and_check(prometheus_start_cmd) + self.up_called = True + logging.info("Trying to connect to Prometheus...") + self.wait_prometheus_to_start() + clickhouse_start_cmd = self.base_cmd + ["up", "-d", "--no-recreate"] logging.debug( ( diff --git a/tests/integration/test_prometheus_protocols/test.py b/tests/integration/test_prometheus_protocols/test.py index e368c841c4e..49bc7817f02 100644 --- a/tests/integration/test_prometheus_protocols/test.py +++ b/tests/integration/test_prometheus_protocols/test.py @@ -20,7 +20,7 @@ node = cluster.add_instance( def execute_query_on_prometheus_writer(query, timestamp): return execute_query_impl( - cluster.get_instance_ip(cluster.prometheus_writer_host), + cluster.prometheus_writer_ip, cluster.prometheus_writer_port, "/api/v1/query", query, @@ -30,7 +30,7 @@ def execute_query_on_prometheus_writer(query, timestamp): def execute_query_on_prometheus_reader(query, timestamp): return execute_query_impl( - cluster.get_instance_ip(cluster.prometheus_reader_host), + cluster.prometheus_reader_ip, cluster.prometheus_reader_port, "/api/v1/query", query, From 0bdf4402fea83e1cb96b0040323c3247f5dcb0b5 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Nov 2024 15:58:38 +0100 Subject: [PATCH 425/566] Post critical errors from cherry_pick.py --- tests/ci/cherry_pick.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 9bdc184f661..ca32d5bc24c 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -34,8 +34,9 @@ from typing import List, Optional import __main__ +from ci_buddy import CIBuddy from ci_config import Labels -from env_helper import TEMP_PATH +from env_helper import IS_CI, TEMP_PATH from get_robot_token import get_best_robot_token from git_helper import GIT_PREFIX, git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository @@ -653,6 +654,14 @@ def main(): bp.process_backports() if bp.error is not None: logging.error("Finished successfully, but errors occurred!") + if IS_CI: + ci_buddy = CIBuddy() + ci_buddy.post_job_error( + f"The cherry-pick finished with errors: {bp.error}", + with_instance_info=True, + with_wf_link=True, + critical=True, + ) raise bp.error From 40c4183ae70c720aaca797b165e3cf71aa4d8133 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 11 Nov 2024 17:26:28 +0100 Subject: [PATCH 426/566] fix tidy build --- src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h | 10 +++++----- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 4 ++-- src/Disks/ObjectStorages/Local/LocalObjectStorage.h | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 317399b4753..7d6c914c398 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -77,11 +77,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object); - - void removeObjects(const StoredObjects & objects); - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; @@ -117,6 +112,11 @@ private: void initializeHDFSFS() const; std::string extractObjectKeyFromURL(const StoredObject & object) const; + /// Remove file. Throws exception if file doesn't exists or it's a directory. + void removeObject(const StoredObject & object); + + void removeObjects(const StoredObjects & objects); + const Poco::Util::AbstractConfiguration & config; mutable HDFSBuilderWrapper hdfs_builder; diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 5f1b6aedc72..f24501dc60e 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -81,7 +81,7 @@ std::unique_ptr LocalObjectStorage::writeObject( /// NO return std::make_unique(object.remote_path, buf_size); } -void LocalObjectStorage::removeObject(const StoredObject & object) +void LocalObjectStorage::removeObject(const StoredObject & object) const { /// For local object storage files are actually removed when "metadata" is removed. if (!exists(object)) @@ -91,7 +91,7 @@ void LocalObjectStorage::removeObject(const StoredObject & object) ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, object.remote_path, "Cannot unlink file {}", object.remote_path); } -void LocalObjectStorage::removeObjects(const StoredObjects & objects) +void LocalObjectStorage::removeObjects(const StoredObjects & objects) const { for (const auto & object : objects) removeObject(object); diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index ffc151bda04..5b3c3951364 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -42,10 +42,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object); - - void removeObjects(const StoredObjects & objects); - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; @@ -82,6 +78,10 @@ public: ReadSettings patchSettings(const ReadSettings & read_settings) const override; private: + void removeObject(const StoredObject & object) const; + + void removeObjects(const StoredObjects & objects) const; + String key_prefix; LoggerPtr log; std::string description; From 6f00b490679f9e26159105f095660f6b23ea34c2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 16:41:23 +0000 Subject: [PATCH 427/566] Fixing more tests. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 2 +- ...filter_push_down_equivalent_sets.reference | 68 +++++++++++-------- .../0_stateless/03199_merge_filters_bug.sql | 34 +++++++++- 3 files changed, 74 insertions(+), 30 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 73fe2600946..2af9974c870 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -358,7 +358,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Don't remove if it's in the list of original outputs .remove_filter_column = step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, - .need_filter = false, + .need_filter = true, .perform_alter_conversions = true, }; diff --git a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference index 80f4e309505..d0a3e7b02ae 100644 --- a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference +++ b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference @@ -163,17 +163,21 @@ Positions: 4 2 0 1 Filter (( + (JOIN actions + Change column names to column identifiers))) Header: __table1.id UInt64 __table1.value String - Filter column: and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) (removed) + AND column: equals(__table1.id, 5_UInt8) Actions: INPUT : 0 -> id UInt64 : 0 - INPUT : 1 -> value String : 1 + COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 1 + FUNCTION equals(id : 0, 5_UInt8 :: 1) -> equals(__table1.id, 5_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) (removed) + Actions: INPUT : 2 -> value String : 0 + INPUT : 1 -> id UInt64 : 1 COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 2 - COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 3 - ALIAS id : 0 -> __table1.id UInt64 : 4 - ALIAS value :: 1 -> __table1.value String : 5 - FUNCTION equals(id : 0, 6_UInt8 :: 2) -> equals(__table1.id, 6_UInt8) UInt8 : 1 - FUNCTION equals(id :: 0, 5_UInt8 :: 3) -> equals(__table1.id, 5_UInt8) UInt8 : 2 - FUNCTION and(equals(__table1.id, 5_UInt8) :: 2, equals(__table1.id, 6_UInt8) :: 1) -> and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) UInt8 : 3 - Positions: 3 4 5 + INPUT : 0 -> equals(__table1.id, 5_UInt8) UInt8 : 3 + ALIAS value :: 0 -> __table1.value String : 4 + ALIAS id : 1 -> __table1.id UInt64 : 0 + FUNCTION equals(id :: 1, 6_UInt8 :: 2) -> equals(__table1.id, 6_UInt8) UInt8 : 5 + FUNCTION and(equals(__table1.id, 5_UInt8) :: 3, equals(__table1.id, 6_UInt8) :: 5) -> and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) UInt8 : 2 + Positions: 2 0 4 ReadFromMergeTree (default.test_table_1) Header: id UInt64 value String @@ -183,17 +187,21 @@ Positions: 4 2 0 1 Filter (( + (JOIN actions + Change column names to column identifiers))) Header: __table2.id UInt64 __table2.value String - Filter column: and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) (removed) + AND column: equals(__table2.id, 6_UInt8) Actions: INPUT : 0 -> id UInt64 : 0 - INPUT : 1 -> value String : 1 + COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 1 + FUNCTION equals(id : 0, 6_UInt8 :: 1) -> equals(__table2.id, 6_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) (removed) + Actions: INPUT : 2 -> value String : 0 + INPUT : 1 -> id UInt64 : 1 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 2 - COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 3 - ALIAS id : 0 -> __table2.id UInt64 : 4 - ALIAS value :: 1 -> __table2.value String : 5 - FUNCTION equals(id : 0, 5_UInt8 :: 2) -> equals(__table2.id, 5_UInt8) UInt8 : 1 - FUNCTION equals(id :: 0, 6_UInt8 :: 3) -> equals(__table2.id, 6_UInt8) UInt8 : 2 - FUNCTION and(equals(__table2.id, 6_UInt8) :: 2, equals(__table2.id, 5_UInt8) :: 1) -> and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) UInt8 : 3 - Positions: 3 4 5 + INPUT : 0 -> equals(__table2.id, 6_UInt8) UInt8 : 3 + ALIAS value :: 0 -> __table2.value String : 4 + ALIAS id : 1 -> __table2.id UInt64 : 0 + FUNCTION equals(id :: 1, 5_UInt8 :: 2) -> equals(__table2.id, 5_UInt8) UInt8 : 5 + FUNCTION and(equals(__table2.id, 6_UInt8) :: 3, equals(__table2.id, 5_UInt8) :: 5) -> and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) UInt8 : 2 + Positions: 2 0 4 ReadFromMergeTree (default.test_table_2) Header: id UInt64 value String @@ -656,17 +664,21 @@ Positions: 4 2 0 1 __table1.value String __table2.value String __table2.id UInt64 - Filter column: and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) (removed) + AND column: equals(__table1.id, 5_UInt8) Actions: INPUT : 0 -> __table1.id UInt64 : 0 - INPUT :: 1 -> __table1.value String : 1 - INPUT :: 2 -> __table2.value String : 2 - INPUT : 3 -> __table2.id UInt64 : 3 - COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 - COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 5 - FUNCTION equals(__table1.id : 0, 5_UInt8 :: 4) -> equals(__table1.id, 5_UInt8) UInt8 : 6 - FUNCTION equals(__table2.id : 3, 6_UInt8 :: 5) -> equals(__table2.id, 6_UInt8) UInt8 : 4 - FUNCTION and(equals(__table1.id, 5_UInt8) :: 6, equals(__table2.id, 6_UInt8) :: 4) -> and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) UInt8 : 5 - Positions: 5 0 1 2 3 + COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 1 + FUNCTION equals(__table1.id : 0, 5_UInt8 :: 1) -> equals(__table1.id, 5_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) (removed) + Actions: INPUT :: 1 -> __table1.id UInt64 : 0 + INPUT :: 2 -> __table1.value String : 1 + INPUT :: 3 -> __table2.value String : 2 + INPUT : 4 -> __table2.id UInt64 : 3 + COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 4 + INPUT : 0 -> equals(__table1.id, 5_UInt8) UInt8 : 5 + FUNCTION equals(__table2.id : 3, 6_UInt8 :: 4) -> equals(__table2.id, 6_UInt8) UInt8 : 6 + FUNCTION and(equals(__table1.id, 5_UInt8) :: 5, equals(__table2.id, 6_UInt8) :: 6) -> and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) UInt8 : 4 + Positions: 4 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String diff --git a/tests/queries/0_stateless/03199_merge_filters_bug.sql b/tests/queries/0_stateless/03199_merge_filters_bug.sql index bb2a4255a3d..2023e0f1d73 100644 --- a/tests/queries/0_stateless/03199_merge_filters_bug.sql +++ b/tests/queries/0_stateless/03199_merge_filters_bug.sql @@ -51,6 +51,22 @@ tmp1 AS LEFT JOIN tmp1 USING (fs1) WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; + optimize table t1 final; WITH @@ -67,4 +83,20 @@ tmp1 AS fs1 FROM t2 LEFT JOIN tmp1 USING (fs1) - WHERE (fs1 IN ('test')); + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; From a5318f60846e206b69538a125af76a222d48dbce Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 11 Nov 2024 08:55:53 -0800 Subject: [PATCH 428/566] Change enable_http_compression setting's default value to 1 --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index d321f5dbdf2..ec07d1cdc4b 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -1788,7 +1788,7 @@ Possible values: - 0 — Disabled. - 1 — Enabled. -)", 0) \ +)", 1) \ DECLARE(Int64, http_zlib_compression_level, 3, R"( Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression). From f60d35161f9d4b34fac0f51ad566906301cb3762 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 11 Nov 2024 08:56:50 -0800 Subject: [PATCH 429/566] Update docs for ru/ and zh/ --- docs/ru/operations/settings/settings.md | 2 +- docs/zh/operations/settings/settings.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 84bbf6c83d3..bbe1f071381 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко - 0 — выключена. - 1 — включена. -Значение по умолчанию: 0. +Значение по умолчанию: 1. ## http_zlib_compression_level {#settings-http_zlib_compression_level} diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md index 5e59196f56c..baa4fcb0754 100644 --- a/docs/zh/operations/settings/settings.md +++ b/docs/zh/operations/settings/settings.md @@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。 - 0 — Disabled. - 1 — Enabled. -默认值:0。 +默认值:1。 ## http_zlib_compression_level {#settings-http_zlib_compression_level} From a0cc03b175b035e9c52e782811d990a619acc272 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 17:50:11 +0000 Subject: [PATCH 430/566] Cleanup. --- src/Processors/QueryPlan/FilterStep.cpp | 17 ++++++++ .../QueryPlan/ReadFromMergeTree.cpp | 2 + .../MergeTree/MergeTreeBlockReadUtils.cpp | 2 +- src/Storages/MergeTree/MergeTreeIOSettings.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 8 ++-- .../MergeTree/MergeTreeSelectProcessor.h | 3 +- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 41 +++++++++++-------- 7 files changed, 51 insertions(+), 24 deletions(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index a6b157cdd1d..5bf55f67208 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -64,6 +64,7 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N return ActionsAndName{std::move(split_result.first), std::move(name)}; } +/// Try to split the left most AND atom to a separate DAG. static std::optional trySplitSingleAndFilter(ActionsDAG & dag, const std::string & filter_name) { const auto * filter = &dag.findInOutputs(filter_name); @@ -83,6 +84,7 @@ static std::optional trySplitSingleAndFilter(ActionsDAG & dag, c if (node->type == ActionsDAG::ActionType::FUNCTION && node->function_base->getName() == "and") { + /// The order is important. We should take the left-most atom, so put conditions on stack in reverse order. for (const auto * child : node->children | std::ranges::views::reverse) nodes.push(child); @@ -141,6 +143,8 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ { std::vector and_atoms; + /// Spliting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. + /// This is needed to support short-circuit properly. if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); @@ -206,6 +210,19 @@ void FilterStep::describeActions(FormatSettings & settings) const void FilterStep::describeActions(JSONBuilder::JSONMap & map) const { + auto cloned_dag = actions_dag.clone(); + + std::vector and_atoms; + if (!actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag)); + map.add("AND column", and_atom.name); + map.add("Expression", expression->toTree()); + } + map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3186df6a6b3..d144187821a 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -175,6 +175,7 @@ namespace Setting extern const SettingsBool use_skip_indexes; extern const SettingsBool use_skip_indexes_if_final; extern const SettingsBool use_uncompressed_cache; + extern const SettingsBool query_plan_merge_filters; extern const SettingsUInt64 merge_tree_min_read_task_size; } @@ -206,6 +207,7 @@ static MergeTreeReaderSettings getMergeTreeReaderSettings( .use_asynchronous_read_from_pool = settings[Setting::allow_asynchronous_read_from_io_pool_for_merge_tree] && (settings[Setting::max_streams_to_max_threads_ratio] > 1 || settings[Setting::max_streams_for_merge_tree_reading] > 1), .enable_multiple_prewhere_read_steps = settings[Setting::enable_multiple_prewhere_read_steps], + .force_shirt_circuit_execution = settings[Setting::query_plan_merge_filters] }; } diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 7ba358d2d35..03a0aed80bf 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -330,7 +330,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( auto prewhere_actions = MergeTreeSelectProcessor::getPrewhereActions( prewhere_info, actions_settings, - reader_settings.enable_multiple_prewhere_read_steps); + reader_settings.enable_multiple_prewhere_read_steps, reader_settings.force_shirt_circuit_execution); for (const auto & step : prewhere_actions.steps) add_step(*step); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 4d1d2533729..ecd4ad34961 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -45,6 +45,8 @@ struct MergeTreeReaderSettings bool use_asynchronous_read_from_pool = false; /// If PREWHERE has multiple conditions combined with AND, execute them in separate read/filtering steps. bool enable_multiple_prewhere_read_steps = false; + /// In case of multiple prewhere steps, execute filtering earlier to support short-circuit properly. + bool force_shirt_circuit_execution = false; /// If true, try to lower size of read buffer according to granule size and compressed block size. bool adjust_read_buffer_size = true; /// If true, it's allowed to read the whole part without reading marks. diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 5efd33ce09a..8beff55e698 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -91,7 +91,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( , algorithm(std::move(algorithm_)) , prewhere_info(prewhere_info_) , actions_settings(actions_settings_) - , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps)) + , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps, reader_settings_.force_shirt_circuit_execution)) , reader_settings(reader_settings_) , result_header(transformHeader(pool->getHeader(), prewhere_info)) { @@ -124,9 +124,9 @@ String MergeTreeSelectProcessor::getName() const return fmt::format("MergeTreeSelect(pool: {}, algorithm: {})", pool->getName(), algorithm->getName()); } -bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere); +bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere, bool force_shirt_circuit_execution); -PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps) +PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps, bool force_shirt_circuit_execution) { PrewhereExprInfo prewhere_actions; if (prewhere_info) @@ -147,7 +147,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr } if (!enable_multiple_prewhere_read_steps || - !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions)) + !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions, force_shirt_circuit_execution)) { PrewhereExprStep prewhere_step { diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 33069a78e33..afd88116e15 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -73,7 +73,8 @@ public: static PrewhereExprInfo getPrewhereActions( PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, - bool enable_multiple_prewhere_read_steps); + bool enable_multiple_prewhere_read_steps, + bool force_shirt_circuit_execution); void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 2af9974c870..c35e356bf18 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -50,6 +50,17 @@ void fillRequiredColumns(const ActionsDAG::Node * node, std::unordered_map DAGNodeRef +/// ResultNode -> DAGNodeRef using OriginalToNewNodeMap = std::unordered_map; using NodeNameToLastUsedStepMap = std::unordered_map; @@ -70,7 +81,6 @@ const ActionsDAG::Node & addClonedDAGToDAG( OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { - //const String & node_name = original_dag_node->result_name; /// Look for the node in the map of already known nodes if (node_remap.contains(original_dag_node)) { @@ -82,9 +92,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( /// If the node is known from the previous steps, add it as an input, except for constants if (original_dag_node->type != ActionsDAG::ActionType::COLUMN) { - node_ref.dag->addOrReplaceInOutputs(*node_ref.node); + // addToOutputsIfNotAlreadyAdded(*node_ref.dag, node_ref.node); + node_ref.dag->getOutputs().push_back(node_ref.node); + const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. @@ -207,7 +219,11 @@ const ActionsDAG::Node & addAndTrue( /// 6. Find all outputs of the original DAG /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 -bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere) +bool tryBuildPrewhereSteps( + PrewhereInfoPtr prewhere_info, + const ExpressionActionsSettings & actions_settings, + PrewhereExprInfo & prewhere, + bool force_shirt_circuit_execution) { if (!prewhere_info) return true; @@ -275,26 +291,16 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Add AND function to combine the conditions FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); //, node_remap); - //step_dag->addOrReplaceInOutputs(and_function_node); result_node = &and_function_node; } else { result_node = new_condition_nodes.front(); /// Check if explicit cast is needed for the condition to serve as a filter. - //const auto result_type_name = result_node->result_type->getName(); - if (isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) - { - /// No need to cast - //step_dag->addOrReplaceInOutputs(result_node); - //result_name = result_node.result_name; - } - else + if (!isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) { /// Build "condition AND True" expression to "cast" the condition to UInt8 or Nullable(UInt8) depending on its type. result_node = &addAndTrue(step_dag, *result_node); //, node_remap); - //step_dag->addOrReplaceInOutputs(cast_node); - //result_name = &cast_node.result_name; } } @@ -334,7 +340,6 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); //, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); - //last_step_dag->addOrReplaceInOutputs(prewhere_result_node); last_step_dag->getOutputs().push_back(&prewhere_result_node); steps.back().result_node = &prewhere_result_node; } @@ -358,7 +363,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Don't remove if it's in the list of original outputs .remove_filter_column = step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, - .need_filter = true, + .need_filter = force_shirt_circuit_execution, .perform_alter_conversions = true, }; From 92114f3c749bb78811ece644123b3d81e011e56f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 18:01:24 +0000 Subject: [PATCH 431/566] Fixing typos. --- src/Processors/QueryPlan/FilterStep.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 5bf55f67208..af9e3f0c515 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -143,7 +143,7 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ { std::vector and_atoms; - /// Spliting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. + /// Splitting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. /// This is needed to support short-circuit properly. if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); From 621cb60446cb17f0366f49b86c3432eed5db3716 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 11 Nov 2024 11:12:01 -0800 Subject: [PATCH 432/566] Fix 'was was' typo in sql-reference/statements/alter/column.md --- docs/en/sql-reference/statements/alter/column.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 29df041ccc6..fb16dacb7c8 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version: - In ClickHouse < v24.2, all existing rows are rewritten. -- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. +- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. Syntax: From 5f9506cc7d526c0c7aaf5a8318dd125dae612c69 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Nov 2024 21:24:18 +0100 Subject: [PATCH 433/566] Cleanup --- src/Interpreters/Cache/FileCache.cpp | 4 ++-- src/Interpreters/Cache/FileCache.h | 3 ++- src/Interpreters/Cache/Metadata.cpp | 2 -- src/Interpreters/Cache/Metadata.h | 5 ----- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 47b5779dd1a..8887165a75d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -87,6 +87,7 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s : max_file_segment_size(settings.max_file_segment_size) , bypass_cache_threshold(settings.enable_bypass_cache_with_threshold ? settings.bypass_cache_threshold : 0) , boundary_alignment(settings.boundary_alignment) + , background_download_max_file_segment_size(settings.background_download_max_file_segment_size) , load_metadata_threads(settings.load_metadata_threads) , load_metadata_asynchronously(settings.load_metadata_asynchronously) , write_cache_per_user_directory(settings.write_cache_per_user_id_directory) @@ -97,7 +98,6 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s , metadata(settings.base_path, settings.background_download_queue_size_limit, settings.background_download_threads, - settings.background_download_max_file_segment_size, write_cache_per_user_directory) { if (settings.cache_policy == "LRU") @@ -1597,7 +1597,7 @@ void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings, if (new_settings.background_download_max_file_segment_size != actual_settings.background_download_max_file_segment_size) { - metadata.setBackgroundDownloadMaxFileSegmentSize(new_settings.background_download_max_file_segment_size); + background_download_max_file_segment_size = new_settings.background_download_max_file_segment_size; LOG_INFO(log, "Changed background_download_max_file_segment_size from {} to {}", actual_settings.background_download_max_file_segment_size, diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 3a9241fd351..bbe8502fec9 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -161,7 +161,7 @@ public: size_t getMaxFileSegmentSize() const { return max_file_segment_size; } - size_t getBackgroundDownloadMaxFileSegmentSize() const { return metadata.getBackgroundDownloadMaxFileSegmentSize(); } + size_t getBackgroundDownloadMaxFileSegmentSize() const { return background_download_max_file_segment_size.load(); } size_t getBoundaryAlignment() const { return boundary_alignment; } @@ -203,6 +203,7 @@ private: std::atomic max_file_segment_size; const size_t bypass_cache_threshold; const size_t boundary_alignment; + std::atomic background_download_max_file_segment_size; size_t load_metadata_threads; const bool load_metadata_asynchronously; std::atomic stop_loading_metadata = false; diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 45077f09020..dd4a5502638 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -168,7 +168,6 @@ CacheMetadata::CacheMetadata( const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_, - size_t background_download_max_file_segment_size_, bool write_cache_per_user_directory_) : path(path_) , cleanup_queue(std::make_shared()) @@ -176,7 +175,6 @@ CacheMetadata::CacheMetadata( , write_cache_per_user_directory(write_cache_per_user_directory_) , log(getLogger("CacheMetadata")) , download_threads_num(background_download_threads_) - , download_max_file_segment_size(background_download_max_file_segment_size_) { } diff --git a/src/Interpreters/Cache/Metadata.h b/src/Interpreters/Cache/Metadata.h index 526b82c9a68..24683b2de71 100644 --- a/src/Interpreters/Cache/Metadata.h +++ b/src/Interpreters/Cache/Metadata.h @@ -165,7 +165,6 @@ public: const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_, - size_t background_download_max_file_segment_size_, bool write_cache_per_user_directory_); void startup(); @@ -212,9 +211,6 @@ public: bool setBackgroundDownloadThreads(size_t threads_num); size_t getBackgroundDownloadThreads() const { return download_threads.size(); } - void setBackgroundDownloadMaxFileSegmentSize(size_t max_file_segment_size) { download_max_file_segment_size = max_file_segment_size; } - size_t getBackgroundDownloadMaxFileSegmentSize() const { return download_max_file_segment_size; } - bool setBackgroundDownloadQueueSizeLimit(size_t size); bool isBackgroundDownloadEnabled(); @@ -246,7 +242,6 @@ private: }; std::atomic download_threads_num; - std::atomic download_max_file_segment_size; std::vector> download_threads; std::unique_ptr cleanup_thread; From 06debdc479bab58f2d1d7fd4b3764e65a8c9fa01 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 8 Nov 2024 17:48:17 +0100 Subject: [PATCH 434/566] result with versioning --- ci/docker/stateless-test/Dockerfile | 6 +- ci/jobs/build_clickhouse.py | 1 - ci/jobs/functional_stateless_tests.py | 11 +- ci/jobs/scripts/clickhouse_proc.py | 11 + .../setup_hdfs_minicluster.sh | 19 ++ ci/praktika/__main__.py | 7 + ci/praktika/_environment.py | 14 +- ci/praktika/_settings.py | 128 ---------- ci/praktika/digest.py | 38 +-- ci/praktika/hook_cache.py | 11 +- ci/praktika/hook_html.py | 71 ++---- ci/praktika/json.html | 11 +- ci/praktika/mangle.py | 36 +-- ci/praktika/native_jobs.py | 14 +- ci/praktika/result.py | 240 +++++++++++++++++- ci/praktika/runner.py | 18 +- ci/praktika/runtime.py | 6 + ci/praktika/s3.py | 172 ++----------- ci/praktika/settings.py | 156 +++++++++++- ci/praktika/utils.py | 2 - ci/praktika/validator.py | 8 +- ci/workflows/pull_request.py | 1 + 22 files changed, 551 insertions(+), 430 deletions(-) create mode 100755 ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh delete mode 100644 ci/praktika/_settings.py diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index 4abd8204f1d..760fceeebbf 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -100,8 +100,12 @@ ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ && tar -xvf hadoop-3.3.1.tar.gz \ - && rm -rf hadoop-3.3.1.tar.gz + && rm -rf hadoop-3.3.1.tar.gz \ + && chmod 777 /hadoop-3.3.1 RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node + +RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse +USER clickhouse \ No newline at end of file diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 1e6d2c648a7..3bdc23d383c 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -127,7 +127,6 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index 0481086d80a..390a6336b45 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -27,11 +27,12 @@ def parse_args(): default="", ) parser.add_argument("--param", help="Optional job start stage", default=None) + parser.add_argument("--test", help="Optional test name pattern", default="") return parser.parse_args() def run_stateless_test( - no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test="" ): assert not (no_parallel and no_sequiential) test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" @@ -43,7 +44,7 @@ def run_stateless_test( --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ - --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + --queries ./tests/queries -- '{test}' | ts '%Y-%m-%d %H:%M:%S' \ | tee -a \"{test_output_file}\"" if Path(test_output_file).exists(): Path(test_output_file).unlink() @@ -119,11 +120,14 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) + hdfs_log = "/tmp/praktika/output/hdfs_mini.log" minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_hdfs(log_file_path=hdfs_log) res = res and CH.start_minio(log_file_path=minio_log) - logs_to_attach += [minio_log] + logs_to_attach += [minio_log, hdfs_log] time.sleep(10) Shell.check("ps -ef | grep minio", verbose=True) + Shell.check("ps -ef | grep hdfs", verbose=True) res = res and Shell.check( "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True ) @@ -153,6 +157,7 @@ def main(): no_sequiential=no_sequential, batch_num=batch_num, batch_total=total_batches, + test=args.test, ) results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) results[-1].set_timing(stopwatch=stop_watch_) diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index c43283e75e0..8f9bef57083 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -44,6 +44,17 @@ class ClickHouseProc: self.minio_proc = None + def start_hdfs(self, log_file_path): + command = ["./ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT + ) + print( + f"Started setup_hdfs_minicluster.sh asynchronously with PID {process.pid}" + ) + return True + def start_minio(self, log_file_path): command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] with open(log_file_path, "w") as log_file: diff --git a/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh new file mode 100755 index 00000000000..b810b27fe2b --- /dev/null +++ b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# shellcheck disable=SC2024 + +set -e -x -a -u + +ls -lha + +cd /hadoop-3.3.1 + +export JAVA_HOME=/usr +mkdir -p target/test/data + +bin/mapred minicluster -format -nomr -nnport 12222 & + +while ! nc -z localhost 12222; do + sleep 1 +done + +lsof -i :12222 diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index fbb9f92909a..3dfdc26d69d 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,12 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--test", + help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test", + type=str, + default="", + ) run_parser.add_argument( "--pr", help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", @@ -106,6 +112,7 @@ if __name__ == "__main__": local_run=not args.ci, no_docker=args.no_docker, param=args.param, + test=args.test, pr=args.pr, branch=args.branch, sha=args.sha, diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 1c6b547ddde..734a4be3176 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -6,7 +6,7 @@ from types import SimpleNamespace from typing import Any, Dict, List, Type from praktika import Workflow -from praktika._settings import _Settings +from praktika.settings import Settings from praktika.utils import MetaClasses, T @@ -35,7 +35,7 @@ class _Environment(MetaClasses.Serializable): @classmethod def file_name_static(cls, _name=""): - return f"{_Settings.TEMP_DIR}/{cls.name}.json" + return f"{Settings.TEMP_DIR}/{cls.name}.json" @classmethod def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T: @@ -66,12 +66,12 @@ class _Environment(MetaClasses.Serializable): @staticmethod def get_needs_statuses(): - if Path(_Settings.WORKFLOW_STATUS_FILE).is_file(): - with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: + if Path(Settings.WORKFLOW_STATUS_FILE).is_file(): + with open(Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: return json.load(f) else: print( - f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist" + f"ERROR: Status file [{Settings.WORKFLOW_STATUS_FILE}] does not exist" ) raise RuntimeError() @@ -171,7 +171,7 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self, settings): + def get_report_url(self, settings, latest=False): import urllib path = settings.HTML_S3_PATH @@ -179,7 +179,7 @@ class _Environment(MetaClasses.Serializable): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={'latest' if latest else self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py deleted file mode 100644 index 17da1519e37..00000000000 --- a/ci/praktika/_settings.py +++ /dev/null @@ -1,128 +0,0 @@ -import dataclasses -from typing import Dict, Iterable, List, Optional - - -@dataclasses.dataclass -class _Settings: - ###################################### - # Pipeline generation settings # - ###################################### - MAIN_BRANCH = "main" - CI_PATH = "./ci" - WORKFLOW_PATH_PREFIX: str = "./.github/workflows" - WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" - SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" - CI_CONFIG_JOB_NAME = "Config Workflow" - DOCKER_BUILD_JOB_NAME = "Docker Builds" - FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" - READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" - CI_CONFIG_RUNS_ON: Optional[List[str]] = None - DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None - VALIDATE_FILE_PATHS: bool = True - - ###################################### - # Runtime Settings # - ###################################### - MAX_RETRIES_S3 = 3 - MAX_RETRIES_GH = 3 - - ###################################### - # S3 (artifact storage) settings # - ###################################### - S3_ARTIFACT_PATH: str = "" - - ###################################### - # CI workspace settings # - ###################################### - TEMP_DIR: str = "/tmp/praktika" - OUTPUT_DIR: str = f"{TEMP_DIR}/output" - INPUT_DIR: str = f"{TEMP_DIR}/input" - PYTHON_INTERPRETER: str = "python3" - PYTHON_PACKET_MANAGER: str = "pip3" - PYTHON_VERSION: str = "3.9" - INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False - INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" - ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" - RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" - - SECRET_GH_APP_ID: str = "GH_APP_ID" - SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" - - ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" - WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" - - ###################################### - # CI Cache settings # - ###################################### - CACHE_VERSION: int = 1 - CACHE_DIGEST_LEN: int = 20 - CACHE_S3_PATH: str = "" - CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" - - ###################################### - # Report settings # - ###################################### - HTML_S3_PATH: str = "" - HTML_PAGE_FILE: str = "./praktika/json.html" - TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) - S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None - - DOCKERHUB_USERNAME: str = "" - DOCKERHUB_SECRET: str = "" - DOCKER_WD: str = "/wd" - - ###################################### - # CI DB Settings # - ###################################### - SECRET_CI_DB_URL: str = "CI_DB_URL" - SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" - CI_DB_DB_NAME = "" - CI_DB_TABLE_NAME = "" - CI_DB_INSERT_TIMEOUT_SEC = 5 - - DISABLE_MERGE_COMMIT = True - - -_USER_DEFINED_SETTINGS = [ - "S3_ARTIFACT_PATH", - "CACHE_S3_PATH", - "HTML_S3_PATH", - "S3_BUCKET_TO_HTTP_ENDPOINT", - "TEXT_CONTENT_EXTENSIONS", - "TEMP_DIR", - "OUTPUT_DIR", - "INPUT_DIR", - "CI_CONFIG_RUNS_ON", - "DOCKER_BUILD_RUNS_ON", - "CI_CONFIG_JOB_NAME", - "PYTHON_INTERPRETER", - "PYTHON_VERSION", - "PYTHON_PACKET_MANAGER", - "INSTALL_PYTHON_FOR_NATIVE_JOBS", - "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", - "MAX_RETRIES_S3", - "MAX_RETRIES_GH", - "VALIDATE_FILE_PATHS", - "DOCKERHUB_USERNAME", - "DOCKERHUB_SECRET", - "READY_FOR_MERGE_STATUS_NAME", - "SECRET_CI_DB_URL", - "SECRET_CI_DB_PASSWORD", - "CI_DB_DB_NAME", - "CI_DB_TABLE_NAME", - "CI_DB_INSERT_TIMEOUT_SEC", - "SECRET_GH_APP_PEM_KEY", - "SECRET_GH_APP_ID", - "MAIN_BRANCH", - "DISABLE_MERGE_COMMIT", -] - - -class GHRunners: - ubuntu = "ubuntu-latest" - - -if __name__ == "__main__": - for setting in _USER_DEFINED_SETTINGS: - print(_Settings().__getattribute__(setting)) - # print(dataclasses.asdict(_Settings())) diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index a1f2eecf9b6..6b7e5eec07b 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -23,7 +23,7 @@ class Digest: hash_string = hash_obj.hexdigest() return hash_string - def calc_job_digest(self, job_config: Job.Config): + def calc_job_digest(self, job_config: Job.Config, docker_digests): config = job_config.digest_config if not config: return "f" * Settings.CACHE_DIGEST_LEN @@ -34,28 +34,28 @@ class Digest: print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" ) - return self.digest_cache[cache_key] - - included_files = Utils.traverse_paths( - job_config.digest_config.include_paths, - job_config.digest_config.exclude_paths, - sorted=True, - ) - print( - f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" - ) - - # Calculate MD5 hash - res = "" - if not included_files: - res = "f" * Settings.CACHE_DIGEST_LEN - print(f"NOTE: empty digest config [{config}] - return dummy digest") + digest = self.digest_cache[cache_key] else: + included_files = Utils.traverse_paths( + job_config.digest_config.include_paths, + job_config.digest_config.exclude_paths, + sorted=True, + ) + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" + ) + hash_md5 = hashlib.md5() for i, file_path in enumerate(included_files): hash_md5 = self._calc_file_digest(file_path, hash_md5) - digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] - self.digest_cache[cache_key] = digest + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + + if job_config.run_in_docker: + # respect docker digest in the job digest + docker_digest = docker_digests[job_config.run_in_docker.split("+")[0]] + digest = "-".join([docker_digest, digest]) + return digest def calc_docker_digest( diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index 5cfedec0144..e001e936a71 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -1,6 +1,5 @@ from praktika._environment import _Environment from praktika.cache import Cache -from praktika.mangle import _get_workflows from praktika.runtime import RunConfig from praktika.settings import Settings from praktika.utils import Utils @@ -10,6 +9,7 @@ class CacheRunnerHooks: @classmethod def configure(cls, workflow): workflow_config = RunConfig.from_fs(workflow.name) + docker_digests = workflow_config.digest_dockers cache = Cache() print(f"Workflow Configure, workflow [{workflow.name}]") assert ( @@ -18,11 +18,13 @@ class CacheRunnerHooks: artifact_digest_map = {} job_digest_map = {} for job in workflow.jobs: + digest = cache.digest.calc_job_digest( + job_config=job, docker_digests=docker_digests + ) if not job.digest_config: print( f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run" ) - digest = cache.digest.calc_job_digest(job_config=job) job_digest_map[job.name] = digest if job.provides: # assign the job digest also to the artifacts it provides @@ -48,7 +50,6 @@ class CacheRunnerHooks: ), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]" print("Check remote cache") - job_to_cache_record = {} for job_name, job_digest in workflow_config.digest_jobs.items(): record = cache.fetch_success(job_name=job_name, job_digest=job_digest) if record: @@ -58,7 +59,7 @@ class CacheRunnerHooks: ) workflow_config.cache_success.append(job_name) workflow_config.cache_success_base64.append(Utils.to_base64(job_name)) - job_to_cache_record[job_name] = record + workflow_config.cache_jobs[job_name] = record print("Check artifacts to reuse") for job in workflow.jobs: @@ -66,7 +67,7 @@ class CacheRunnerHooks: if job.provides: for artifact_name in job.provides: workflow_config.cache_artifacts[artifact_name] = ( - job_to_cache_record[job.name] + workflow_config.cache_jobs[job.name] ) print(f"Write config to GH's job output") diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index ca2692d1b22..e2faefb2fa9 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -6,7 +6,7 @@ from typing import List from praktika._environment import _Environment from praktika.gh import GH from praktika.parser import WorkflowConfigParser -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings @@ -119,6 +119,7 @@ class HtmlRunnerHooks: # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success + job_cache_records = RunConfig.from_fs(_workflow.name).cache_jobs else: skip_jobs = [] @@ -128,21 +129,14 @@ class HtmlRunnerHooks: if job.name not in skip_jobs: result = Result.generate_pending(job.name) else: - result = Result.generate_skipped(job.name) + result = Result.generate_skipped(job.name, job_cache_records[job.name]) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) summary_result.links.append(env.CHANGE_URL) summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - # clean the previous latest results in PR if any - if env.PR_NUMBER: - S3.clean_latest_result() - S3.copy_result_to_s3( - summary_result, - unlock=False, - ) - + assert _ResultS3.copy_result_to_s3_with_version(summary_result, version=0) page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") @@ -150,7 +144,7 @@ class HtmlRunnerHooks: name=_workflow.name, status=Result.Status.PENDING, description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -167,14 +161,8 @@ class HtmlRunnerHooks: @classmethod def pre_run(cls, _workflow, _job): result = Result.from_fs(_job.name) - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - ) - workflow_result = Result.from_fs(_workflow.name) - workflow_result.update_sub_result(result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + _ResultS3.update_workflow_results( + workflow_name=_workflow.name, new_sub_results=result ) @classmethod @@ -184,14 +172,13 @@ class HtmlRunnerHooks: @classmethod def post_run(cls, _workflow, _job, info_errors): result = Result.from_fs(_job.name) - env = _Environment.get() - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - lock=True, - ) - workflow_result = Result.from_fs(_workflow.name) - print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]") + _ResultS3.upload_result_files_to_s3(result) + _ResultS3.copy_result_to_s3(result) + env = _Environment.get() + + new_sub_results = [result] + new_result_info = "" env_info = env.REPORT_INFO if env_info: print( @@ -203,14 +190,8 @@ class HtmlRunnerHooks: info_str = f"{_job.name}:\n" info_str += "\n".join(info_errors) print("Update workflow results with new info") - workflow_result.set_info(info_str) + new_result_info = info_str - old_status = workflow_result.status - - S3.upload_result_files_to_s3(result) - workflow_result.update_sub_result(result) - - skipped_job_results = [] if not result.is_ok(): print( "Current job failed - find dependee jobs in the workflow and set their statuses to skipped" @@ -223,7 +204,7 @@ class HtmlRunnerHooks: print( f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure" ) - skipped_job_results.append( + new_sub_results.append( Result( name=dependee_job.name, status=Result.Status.SKIPPED, @@ -231,20 +212,18 @@ class HtmlRunnerHooks: + f" [{_job.name}]", ) ) - for skipped_job_result in skipped_job_results: - workflow_result.update_sub_result(skipped_job_result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + updated_status = _ResultS3.update_workflow_results( + new_info=new_result_info, + new_sub_results=new_sub_results, + workflow_name=_workflow.name, ) - if workflow_result.status != old_status: - print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" - ) + + if updated_status: + print(f"Update GH commit status [{result.name}]: [{updated_status}]") GH.post_commit_status( - name=workflow_result.name, - status=GH.convert_to_gh_status(workflow_result.status), + name=_workflow.name, + status=GH.convert_to_gh_status(updated_status), description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 4e15a67ba76..544fd6e68d4 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -342,7 +342,7 @@ const milliseconds = Math.floor((duration % 1) * 1000); const formattedSeconds = String(seconds); - const formattedMilliseconds = String(milliseconds).padStart(3, '0'); + const formattedMilliseconds = String(milliseconds).padStart(2, '0').slice(-2); return `${formattedSeconds}.${formattedMilliseconds}`; } @@ -600,8 +600,7 @@ td.classList.add('time-column'); td.textContent = value ? formatDuration(value) : ''; } else if (column === 'info') { - // For info and other columns, just display the value - td.textContent = value || ''; + td.textContent = value.includes('\n') ? '↵' : (value || ''); td.classList.add('info-column'); } @@ -675,7 +674,8 @@ } if (targetData) { - infoElement.style.display = 'none'; + //infoElement.style.display = 'none'; + infoElement.innerHTML = (targetData.info || '').replace(/\n/g, '
'); addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) @@ -804,7 +804,8 @@ // Check if all required parameters are present to load JSON if (PR && sha && root_name) { - loadResultsJSON(PR, sha, nameParams); + const shaToLoad = (sha === 'latest') ? commitsArray[commitsArray.length - 1] : sha; + loadResultsJSON(PR, shaToLoad, nameParams); } else { document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; } diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index b16d52fbbbf..f94b11adad5 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -1,11 +1,10 @@ import copy import importlib.util from pathlib import Path -from typing import Any, Dict from praktika import Job -from praktika._settings import _USER_DEFINED_SETTINGS, _Settings -from praktika.utils import ContextManager, Utils +from praktika.settings import Settings +from praktika.utils import Utils def _get_workflows(name=None, file=None): @@ -14,13 +13,13 @@ def _get_workflows(name=None, file=None): """ res = [] - directory = Path(_Settings.WORKFLOWS_DIRECTORY) + directory = Path(Settings.WORKFLOWS_DIRECTORY) for py_file in directory.glob("*.py"): if file and file not in str(py_file): continue module_name = py_file.name.removeprefix(".py") spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + module_name, f"{Settings.WORKFLOWS_DIRECTORY}/{module_name}" ) assert spec foo = importlib.util.module_from_spec(spec) @@ -106,30 +105,3 @@ def _update_workflow_with_native_jobs(workflow): for job in workflow.jobs: aux_job.requires.append(job.name) workflow.jobs.append(aux_job) - - -def _get_user_settings() -> Dict[str, Any]: - """ - Gets user's settings - """ - res = {} # type: Dict[str, Any] - - directory = Path(_Settings.SETTINGS_DIRECTORY) - for py_file in directory.glob("*.py"): - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - for setting in _USER_DEFINED_SETTINGS: - try: - value = getattr(foo, setting) - res[setting] = value - print(f"Apply user defined setting [{setting} = {value}]") - except Exception as e: - pass - - return res diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 58af211988b..52bf6c6e204 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -10,9 +10,8 @@ from praktika.gh import GH from praktika.hook_cache import CacheRunnerHooks from praktika.hook_html import HtmlRunnerHooks from praktika.mangle import _get_workflows -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig -from praktika.s3 import S3 from praktika.settings import Settings from praktika.utils import Shell, Utils @@ -225,6 +224,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ).dump() # checks: @@ -310,9 +310,8 @@ def _finish_workflow(workflow, job_name): print(env.get_needs_statuses()) print("Check Workflow results") - S3.copy_result_from_s3( + _ResultS3.copy_result_from_s3( Result.file_name_static(workflow.name), - lock=False, ) workflow_result = Result.from_fs(workflow.name) @@ -345,7 +344,7 @@ def _finish_workflow(workflow, job_name): failed_results.append(result.name) if failed_results: - ready_for_merge_description = f"failed: {', '.join(failed_results)}" + ready_for_merge_description = f"Failed: {', '.join(failed_results)}" if not GH.post_commit_status( name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]", @@ -357,10 +356,9 @@ def _finish_workflow(workflow, job_name): env.add_info(ResultInfo.GH_STATUS_ERROR) if update_final_report: - S3.copy_result_to_s3( + _ResultS3.copy_result_to_s3( workflow_result, - unlock=False, - ) # no lock - no unlock + ) Result.from_fs(job_name).set_status(Result.Status.SUCCESS) diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 842deacbcbd..8164b1d1295 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -2,10 +2,12 @@ import dataclasses import datetime import sys from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from praktika._environment import _Environment -from praktika._settings import _Settings +from praktika.cache import Cache +from praktika.s3 import S3 +from praktika.settings import Settings from praktika.utils import ContextManager, MetaClasses, Shell, Utils @@ -55,7 +57,7 @@ class Result(MetaClasses.Serializable): stopwatch: Utils.Stopwatch = None, status="", files=None, - info="", + info: Union[List[str], str] = "", with_info_from_results=True, ): if isinstance(status, bool): @@ -149,7 +151,7 @@ class Result(MetaClasses.Serializable): @classmethod def file_name_static(cls, name): - return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" + return f"{Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" @classmethod def from_dict(cls, obj: Dict[str, Any]) -> "Result": @@ -232,7 +234,7 @@ class Result(MetaClasses.Serializable): ) @classmethod - def generate_skipped(cls, name, results=None): + def generate_skipped(cls, name, cache_record: Cache.CacheRecord, results=None): return Result( name=name, status=Result.Status.SKIPPED, @@ -241,7 +243,7 @@ class Result(MetaClasses.Serializable): results=results or [], files=[], links=[], - info="from cache", + info=f"from cache: sha [{cache_record.sha}], pr/branch [{cache_record.pr_number or cache_record.branch}]", ) @classmethod @@ -275,7 +277,7 @@ class Result(MetaClasses.Serializable): # Set log file path if logging is enabled log_file = ( - f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" + f"{Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" if with_log else None ) @@ -321,14 +323,31 @@ class Result(MetaClasses.Serializable): self.dump() if not self.is_ok(): print("ERROR: Job Failed") - for result in self.results: - if not result.is_ok(): - print("Failed checks:") - print(" | ", result) + print(self.to_stdout_formatted()) sys.exit(1) else: print("ok") + def to_stdout_formatted(self, indent="", res=""): + if self.is_ok(): + return res + + res += f"{indent}Task [{self.name}] failed.\n" + fail_info = "" + sub_indent = indent + " " + + if not self.results: + if not self.is_ok(): + fail_info += f"{sub_indent}{self.name}:\n" + for line in self.info.splitlines(): + fail_info += f"{sub_indent}{sub_indent}{line}\n" + return res + fail_info + + for sub_result in self.results: + res = sub_result.to_stdout_formatted(sub_indent, res) + + return res + class ResultInfo: SETUP_ENV_JOB_FAILED = ( @@ -351,3 +370,202 @@ class ResultInfo: ) S3_ERROR = "S3 call failure" + + +class _ResultS3: + + @classmethod + def copy_result_to_s3(cls, result, unlock=False): + result.dump() + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" + s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" + url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) + # if unlock: + # if not cls.unlock(s3_path_full): + # print(f"ERROR: File [{s3_path_full}] unlock failure") + # assert False # TODO: investigate + return url + + @classmethod + def copy_result_from_s3(cls, local_path, lock=False): + env = _Environment.get() + file_name = Path(local_path).name + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" + # if lock: + # cls.lock(s3_path) + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + + @classmethod + def copy_result_from_s3_with_version(cls, local_path): + env = _Environment.get() + file_name = Path(local_path).name + local_dir = Path(local_path).parent + file_name_pattern = f"{file_name}_*" + for file_path in local_dir.glob(file_name_pattern): + file_path.unlink() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if not S3.copy_file_from_s3_matching_pattern( + s3_path=s3_path, local_path=local_dir, include=file_name_pattern + ): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + result_files = [] + for file_path in local_dir.glob(file_name_pattern): + result_files.append(file_path) + assert result_files, "No result files found" + result_files.sort() + version = int(result_files[-1].name.split("_")[-1]) + Shell.check(f"cp {result_files[-1]} {local_path}", strict=True, verbose=True) + return version + + @classmethod + def copy_result_to_s3_with_version(cls, result, version): + result.dump() + filename = Path(result.file_name()).name + file_name_versioned = f"{filename}_{str(version).zfill(3)}" + env = _Environment.get() + s3_path_versioned = ( + f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name_versioned}" + ) + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if version == 0: + S3.clean_s3_directory(s3_path=s3_path) + if not S3.put( + s3_path=s3_path_versioned, + local_path=result.file_name(), + if_none_matched=True, + ): + print("Failed to put versioned Result") + return False + if not S3.put(s3_path=s3_path, local_path=result.file_name()): + print("Failed to put non-versioned Result") + return True + + # @classmethod + # def lock(cls, s3_path, level=0): + # env = _Environment.get() + # s3_path_lock = s3_path + f".lock" + # file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" + # assert Shell.check( + # f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True + # ), "Never" + # + # i = 20 + # meta = S3.head_object(s3_path_lock) + # while meta: + # locked_by_job = meta.get("Metadata", {"job": ""}).get("job", "") + # if locked_by_job: + # decoded_bytes = base64.b64decode(locked_by_job) + # locked_by_job = decoded_bytes.decode("utf-8") + # print( + # f"WARNING: Failed to acquire lock, meta [{meta}], job [{locked_by_job}] - wait" + # ) + # i -= 5 + # if i < 0: + # info = f"ERROR: lock acquire failure - unlock forcefully" + # print(info) + # env.add_info(info) + # break + # time.sleep(5) + # + # metadata = {"job": Utils.to_base64(env.JOB_NAME)} + # S3.put( + # s3_path=s3_path_lock, + # local_path=file_path_lock, + # metadata=metadata, + # if_none_matched=True, + # ) + # time.sleep(1) + # obj = S3.head_object(s3_path_lock) + # if not obj or not obj.has_tags(tags=metadata): + # print(f"WARNING: locked by another job [{obj}]") + # env.add_info("S3 lock file failure") + # cls.lock(s3_path, level=level + 1) + # print("INFO: lock acquired") + # + # @classmethod + # def unlock(cls, s3_path): + # s3_path_lock = s3_path + ".lock" + # env = _Environment.get() + # obj = S3.head_object(s3_path_lock) + # if not obj: + # print("ERROR: lock file is removed") + # assert False # investigate + # elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): + # print("ERROR: lock file was acquired by another job") + # assert False # investigate + # + # if not S3.delete(s3_path_lock): + # print(f"ERROR: File [{s3_path_lock}] delete failure") + # print("INFO: lock released") + # return True + + @classmethod + def upload_result_files_to_s3(cls, result): + if result.results: + for result_ in result.results: + cls.upload_result_files_to_s3(result_) + for file in result.files: + if not Path(file).is_file(): + print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") + result.info += f"\nWARNING: Result file [{file}] was not found" + file_link = S3._upload_file_to_s3(file, upload_to_s3=False) + else: + is_text = False + for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: + if file.endswith(text_file_suffix): + print( + f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" + ) + is_text = True + break + file_link = S3._upload_file_to_s3( + file, + upload_to_s3=True, + text=is_text, + s3_subprefix=Utils.normalize_string(result.name), + ) + result.links.append(file_link) + if result.files: + print( + f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" + ) + result.files = [] + result.dump() + + @classmethod + def update_workflow_results(cls, workflow_name, new_info="", new_sub_results=None): + assert new_info or new_sub_results + + attempt = 1 + prev_status = "" + new_status = "" + done = False + while attempt < 10: + version = cls.copy_result_from_s3_with_version( + Result.file_name_static(workflow_name) + ) + workflow_result = Result.from_fs(workflow_name) + prev_status = workflow_result.status + if new_info: + workflow_result.set_info(new_info) + if new_sub_results: + if isinstance(new_sub_results, Result): + new_sub_results = [new_sub_results] + for result_ in new_sub_results: + workflow_result.update_sub_result(result_) + new_status = workflow_result.status + if cls.copy_result_to_s3_with_version(workflow_result, version=version + 1): + done = True + break + print(f"Attempt [{attempt}] to upload workflow result failed") + attempt += 1 + assert done + + if prev_status != new_status: + return new_status + else: + return None diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 1ac8748d1c0..38112dd5684 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -52,6 +52,7 @@ class Runner: cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ) for docker in workflow.dockers: workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest( @@ -123,7 +124,7 @@ class Runner: return 0 - def _run(self, workflow, job, docker="", no_docker=False, param=None): + def _run(self, workflow, job, docker="", no_docker=False, param=None, test=""): # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name @@ -162,6 +163,9 @@ class Runner: if param: print(f"Custom --param [{param}] will be passed to job's script") cmd += f" --param {param}" + if test: + print(f"Custom --test [{test}] will be passed to job's script") + cmd += f" --test {test}" print(f"--- Run command [{cmd}]") with TeePopen(cmd, timeout=job.timeout) as process: @@ -240,10 +244,6 @@ class Runner: result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() - if result.info and result.status != Result.Status.SUCCESS: - # provide job info to workflow level - info_errors.append(result.info) - if run_exit_code == 0: providing_artifacts = [] if job.provides and workflow.artifacts: @@ -310,6 +310,7 @@ class Runner: local_run=False, no_docker=False, param=None, + test="", pr=None, sha=None, branch=None, @@ -358,7 +359,12 @@ class Runner: print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===") try: run_code = self._run( - workflow, job, docker=docker, no_docker=no_docker, param=param + workflow, + job, + docker=docker, + no_docker=no_docker, + param=param, + test=test, ) res = run_code == 0 if not res: diff --git a/ci/praktika/runtime.py b/ci/praktika/runtime.py index a87b67c2c79..07c24e0498c 100644 --- a/ci/praktika/runtime.py +++ b/ci/praktika/runtime.py @@ -15,17 +15,23 @@ class RunConfig(MetaClasses.Serializable): # there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this cache_success_base64: List[str] cache_artifacts: Dict[str, Cache.CacheRecord] + cache_jobs: Dict[str, Cache.CacheRecord] sha: str @classmethod def from_dict(cls, obj): cache_artifacts = obj["cache_artifacts"] + cache_jobs = obj["cache_jobs"] cache_artifacts_deserialized = {} + cache_jobs_deserialized = {} for artifact_name, cache_artifact in cache_artifacts.items(): cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict( cache_artifact ) obj["cache_artifacts"] = cache_artifacts_deserialized + for job_name, cache_jobs in cache_jobs.items(): + cache_jobs_deserialized[job_name] = Cache.CacheRecord.from_dict(cache_jobs) + obj["cache_jobs"] = cache_artifacts_deserialized return RunConfig(**obj) @classmethod diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 04a08622dcd..82034b57b80 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -1,12 +1,11 @@ import dataclasses import json -import time from pathlib import Path from typing import Dict from praktika._environment import _Environment from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Shell class S3: @@ -59,16 +58,15 @@ class S3: return f"https://{s3_full_path}".replace(bucket, endpoint) @classmethod - def put(cls, s3_path, local_path, text=False, metadata=None): + def put(cls, s3_path, local_path, text=False, metadata=None, if_none_matched=False): assert Path(local_path).exists(), f"Path [{local_path}] does not exist" assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" assert Path( local_path ).is_file(), f"Path [{local_path}] is not file. Only files are supported" - file_name = Path(local_path).name s3_full_path = s3_path - if not s3_full_path.endswith(file_name): - s3_full_path = f"{s3_path}/{Path(local_path).name}" + if s3_full_path.endswith("/"): + s3_full_path = f"{s3_path}{Path(local_path).name}" s3_full_path = str(s3_full_path).removeprefix("s3://") bucket, key = s3_full_path.split("/", maxsplit=1) @@ -76,6 +74,8 @@ class S3: command = ( f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}" ) + if if_none_matched: + command += f' --if-none-match "*"' if metadata: for k, v in metadata.items(): command += f" --metadata {k}={v}" @@ -84,7 +84,7 @@ class S3: if text: cmd += " --content-type text/plain" res = cls.run_command_with_retries(command) - assert res + return res @classmethod def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3): @@ -101,6 +101,14 @@ class S3: elif "does not exist" in stderr: print("ERROR: requested file does not exist") break + elif "Unknown options" in stderr: + print("ERROR: Invalid AWS CLI command or CLI client version:") + print(f" | awc error: {stderr}") + break + elif "PreconditionFailed" in stderr: + print("ERROR: AWS API Call Precondition Failed") + print(f" | awc error: {stderr}") + break if ret_code != 0: print( f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]" @@ -108,13 +116,6 @@ class S3: res = ret_code == 0 return res - @classmethod - def get_link(cls, s3_path, local_path): - s3_full_path = f"{s3_path}/{Path(local_path).name}" - bucket = s3_path.split("/")[0] - endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] - return f"https://{s3_full_path}".replace(bucket, endpoint) - @classmethod def copy_file_from_s3(cls, s3_path, local_path): assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" @@ -128,6 +129,19 @@ class S3: res = cls.run_command_with_retries(cmd) return res + @classmethod + def copy_file_from_s3_matching_pattern( + cls, s3_path, local_path, include, exclude="*" + ): + assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" + assert Path( + local_path + ).is_dir(), f"Path [{local_path}] does not exist or not a directory" + assert s3_path.endswith("/"), f"s3 path is invalid [{s3_path}]" + cmd = f'aws s3 cp s3://{s3_path} {local_path} --exclude "{exclude}" --include "{include}" --recursive' + res = cls.run_command_with_retries(cmd) + return res + @classmethod def head_object(cls, s3_path): s3_path = str(s3_path).removeprefix("s3://") @@ -148,103 +162,6 @@ class S3: verbose=True, ) - # TODO: apparently should be placed into separate file to be used only inside praktika - # keeping this module clean from importing Settings, Environment and etc, making it easy for use externally - @classmethod - def copy_result_to_s3(cls, result, unlock=True): - result.dump() - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if env.PR_NUMBER: - print("Duplicate Result for latest commit alias in PR") - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if unlock: - if not cls.unlock(s3_path_full): - print(f"ERROR: File [{s3_path_full}] unlock failure") - assert False # TODO: investigate - return url - - @classmethod - def copy_result_from_s3(cls, local_path, lock=True): - env = _Environment.get() - file_name = Path(local_path).name - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" - if lock: - cls.lock(s3_path) - if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): - print(f"ERROR: failed to cp file [{s3_path}] from s3") - raise - - @classmethod - def lock(cls, s3_path, level=0): - assert level < 3, "Never" - env = _Environment.get() - s3_path_lock = s3_path + f".lock" - file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" - assert Shell.check( - f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True - ), "Never" - - i = 20 - meta = S3.head_object(s3_path_lock) - while meta: - print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait") - i -= 5 - if i < 0: - info = f"ERROR: lock acquire failure - unlock forcefully" - print(info) - env.add_info(info) - break - time.sleep(5) - - metadata = {"job": Utils.to_base64(env.JOB_NAME)} - S3.put( - s3_path=s3_path_lock, - local_path=file_path_lock, - metadata=metadata, - ) - time.sleep(1) - obj = S3.head_object(s3_path_lock) - if not obj or not obj.has_tags(tags=metadata): - print(f"WARNING: locked by another job [{obj}]") - env.add_info("S3 lock file failure") - cls.lock(s3_path, level=level + 1) - print("INFO: lock acquired") - - @classmethod - def unlock(cls, s3_path): - s3_path_lock = s3_path + ".lock" - env = _Environment.get() - obj = S3.head_object(s3_path_lock) - if not obj: - print("ERROR: lock file is removed") - assert False # investigate - elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): - print("ERROR: lock file was acquired by another job") - assert False # investigate - - if not S3.delete(s3_path_lock): - print(f"ERROR: File [{s3_path_lock}] delete failure") - print("INFO: lock released") - return True - - @classmethod - def get_result_link(cls, result): - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}" - return S3.get_link(s3_path=s3_path, local_path=result.file_name()) - - @classmethod - def clean_latest_result(cls): - env = _Environment.get() - env.SHA = "latest" - assert env.PR_NUMBER - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - S3.clean_s3_directory(s3_path=s3_path) - @classmethod def _upload_file_to_s3( cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix="" @@ -260,36 +177,3 @@ class S3: ) return html_link return f"file://{Path(local_file_path).absolute()}" - - @classmethod - def upload_result_files_to_s3(cls, result): - if result.results: - for result_ in result.results: - cls.upload_result_files_to_s3(result_) - for file in result.files: - if not Path(file).is_file(): - print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") - result.info += f"\nWARNING: Result file [{file}] was not found" - file_link = cls._upload_file_to_s3(file, upload_to_s3=False) - else: - is_text = False - for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: - if file.endswith(text_file_suffix): - print( - f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" - ) - is_text = True - break - file_link = cls._upload_file_to_s3( - file, - upload_to_s3=True, - text=is_text, - s3_subprefix=Utils.normalize_string(result.name), - ) - result.links.append(file_link) - if result.files: - print( - f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" - ) - result.files = [] - result.dump() diff --git a/ci/praktika/settings.py b/ci/praktika/settings.py index 1a4068d9398..b281a95370c 100644 --- a/ci/praktika/settings.py +++ b/ci/praktika/settings.py @@ -1,8 +1,152 @@ -from praktika._settings import _Settings -from praktika.mangle import _get_user_settings +import dataclasses +import importlib.util +from pathlib import Path +from typing import Dict, Iterable, List, Optional -Settings = _Settings() -user_settings = _get_user_settings() -for setting, value in user_settings.items(): - Settings.__setattr__(setting, value) +@dataclasses.dataclass +class _Settings: + ###################################### + # Pipeline generation settings # + ###################################### + MAIN_BRANCH = "main" + CI_PATH = "./ci" + WORKFLOW_PATH_PREFIX: str = "./.github/workflows" + WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" + SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" + CI_CONFIG_JOB_NAME = "Config Workflow" + DOCKER_BUILD_JOB_NAME = "Docker Builds" + FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" + READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" + CI_CONFIG_RUNS_ON: Optional[List[str]] = None + DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None + VALIDATE_FILE_PATHS: bool = True + + ###################################### + # Runtime Settings # + ###################################### + MAX_RETRIES_S3 = 3 + MAX_RETRIES_GH = 3 + + ###################################### + # S3 (artifact storage) settings # + ###################################### + S3_ARTIFACT_PATH: str = "" + + ###################################### + # CI workspace settings # + ###################################### + TEMP_DIR: str = "/tmp/praktika" + OUTPUT_DIR: str = f"{TEMP_DIR}/output" + INPUT_DIR: str = f"{TEMP_DIR}/input" + PYTHON_INTERPRETER: str = "python3" + PYTHON_PACKET_MANAGER: str = "pip3" + PYTHON_VERSION: str = "3.9" + INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False + INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" + ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" + RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" + + SECRET_GH_APP_ID: str = "GH_APP_ID" + SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" + + ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" + WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" + + ###################################### + # CI Cache settings # + ###################################### + CACHE_VERSION: int = 1 + CACHE_DIGEST_LEN: int = 20 + CACHE_S3_PATH: str = "" + CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" + + ###################################### + # Report settings # + ###################################### + HTML_S3_PATH: str = "" + HTML_PAGE_FILE: str = "./praktika/json.html" + TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) + S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None + + DOCKERHUB_USERNAME: str = "" + DOCKERHUB_SECRET: str = "" + DOCKER_WD: str = "/wd" + + ###################################### + # CI DB Settings # + ###################################### + SECRET_CI_DB_URL: str = "CI_DB_URL" + SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" + CI_DB_DB_NAME = "" + CI_DB_TABLE_NAME = "" + CI_DB_INSERT_TIMEOUT_SEC = 5 + + DISABLE_MERGE_COMMIT = True + + +_USER_DEFINED_SETTINGS = [ + "S3_ARTIFACT_PATH", + "CACHE_S3_PATH", + "HTML_S3_PATH", + "S3_BUCKET_TO_HTTP_ENDPOINT", + "TEXT_CONTENT_EXTENSIONS", + "TEMP_DIR", + "OUTPUT_DIR", + "INPUT_DIR", + "CI_CONFIG_RUNS_ON", + "DOCKER_BUILD_RUNS_ON", + "CI_CONFIG_JOB_NAME", + "PYTHON_INTERPRETER", + "PYTHON_VERSION", + "PYTHON_PACKET_MANAGER", + "INSTALL_PYTHON_FOR_NATIVE_JOBS", + "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", + "MAX_RETRIES_S3", + "MAX_RETRIES_GH", + "VALIDATE_FILE_PATHS", + "DOCKERHUB_USERNAME", + "DOCKERHUB_SECRET", + "READY_FOR_MERGE_STATUS_NAME", + "SECRET_CI_DB_URL", + "SECRET_CI_DB_PASSWORD", + "CI_DB_DB_NAME", + "CI_DB_TABLE_NAME", + "CI_DB_INSERT_TIMEOUT_SEC", + "SECRET_GH_APP_PEM_KEY", + "SECRET_GH_APP_ID", + "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", +] + + +def _get_settings() -> _Settings: + res = _Settings() + + directory = Path(_Settings.SETTINGS_DIRECTORY) + for py_file in directory.glob("*.py"): + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + for setting in _USER_DEFINED_SETTINGS: + try: + value = getattr(foo, setting) + res.__setattr__(setting, value) + # print(f"- read user defined setting [{setting} = {value}]") + except Exception as e: + # print(f"Exception while read user settings: {e}") + pass + + return res + + +class GHRunners: + ubuntu = "ubuntu-latest" + + +Settings = _get_settings() diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index 62eb13b3e19..2bcc94f2559 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -17,8 +17,6 @@ from threading import Thread from types import SimpleNamespace from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union -from praktika._settings import _Settings - T = TypeVar("T", bound="Serializable") diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index d612881b819..0bb722903e5 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -4,10 +4,8 @@ from itertools import chain from pathlib import Path from praktika import Workflow -from praktika._settings import GHRunners from praktika.mangle import _get_workflows -from praktika.settings import Settings -from praktika.utils import ContextManager +from praktika.settings import GHRunners, Settings class Validator: @@ -168,9 +166,7 @@ class Validator: "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name - ) + cls.evaluate_check(path.is_file(), message, job.name, workflow.name) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 0d505ae27c4..707babb1250 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -68,6 +68,7 @@ stateless_tests_jobs = Job.Config( name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + # many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ From b05d3ed6df35b2e66c81bc8d7b9077a82758dcf1 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Nov 2024 22:43:03 +0100 Subject: [PATCH 435/566] impl --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 3 + .../ExecuteScalarSubqueriesVisitor.cpp | 7 ++- src/Interpreters/PreparedSets.cpp | 19 +++--- src/Interpreters/ProcessorsProfileLog.cpp | 62 ++++++++++++++++++- src/Interpreters/ProcessorsProfileLog.h | 1 + src/Interpreters/executeQuery.cpp | 48 +------------- 6 files changed, 80 insertions(+), 60 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 390418494e7..03ebd893c47 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -676,6 +677,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden "tuple"}); } } + + logProcessorProfile(context, io.pipeline.getProcessors()); } scalars_cache.emplace(node_with_hash, scalar_block); diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index d4da038c089..9ae2ffc208d 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -19,9 +21,8 @@ #include #include #include -#include #include -#include +#include namespace ProfileEvents { @@ -246,6 +247,8 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr if (tmp_block.rows() != 0) throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row"); + + logProcessorProfile(data.getContext(), io.pipeline.getProcessors()); } block = materializeBlock(block); diff --git a/src/Interpreters/PreparedSets.cpp b/src/Interpreters/PreparedSets.cpp index 538108165fb..c69e2f84d42 100644 --- a/src/Interpreters/PreparedSets.cpp +++ b/src/Interpreters/PreparedSets.cpp @@ -1,21 +1,22 @@ #include #include -#include -#include -#include -#include -#include +#include #include -#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include #include -#include -#include #include #include +#include namespace DB { @@ -239,6 +240,8 @@ SetPtr FutureSetFromSubquery::buildOrderedSetInplace(const ContextPtr & context) if (!set_and_key->set->isCreated()) return nullptr; + logProcessorProfile(context, pipeline.getProcessors()); + return set_and_key->set; } diff --git a/src/Interpreters/ProcessorsProfileLog.cpp b/src/Interpreters/ProcessorsProfileLog.cpp index 8a646b5d0e7..d7811e5e9e2 100644 --- a/src/Interpreters/ProcessorsProfileLog.cpp +++ b/src/Interpreters/ProcessorsProfileLog.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -8,16 +9,19 @@ #include #include #include +#include #include #include #include -#include - -#include namespace DB { +namespace Setting +{ +extern const SettingsBool log_processors_profiles; +} + ColumnsDescription ProcessorProfileLogElement::getColumnsDescription() { return ColumnsDescription @@ -81,5 +85,57 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(output_bytes); } +void logProcessorProfile(ContextPtr context, const Processors & processors) +{ + const Settings & settings = context->getSettingsRef(); + if (settings[Setting::log_processors_profiles]) + { + if (auto processors_profile_log = context->getProcessorsProfileLog()) + { + ProcessorProfileLogElement processor_elem; + const auto time_now = std::chrono::system_clock::now(); + processor_elem.event_time = timeInSeconds(time_now); + processor_elem.event_time_microseconds = timeInMicroseconds(time_now); + processor_elem.initial_query_id = context->getInitialQueryId(); + processor_elem.query_id = context->getCurrentQueryId(); + + auto get_proc_id = [](const IProcessor & proc) -> UInt64 { return reinterpret_cast(&proc); }; + + for (const auto & processor : processors) + { + std::vector parents; + for (const auto & port : processor->getOutputs()) + { + if (!port.isConnected()) + continue; + const IProcessor & next = port.getInputPort().getProcessor(); + parents.push_back(get_proc_id(next)); + } + + processor_elem.id = get_proc_id(*processor); + processor_elem.parent_ids = std::move(parents); + + processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); + processor_elem.plan_step_name = processor->getPlanStepName(); + processor_elem.plan_step_description = processor->getPlanStepDescription(); + processor_elem.plan_group = processor->getQueryPlanStepGroup(); + + processor_elem.processor_name = processor->getName(); + + processor_elem.elapsed_us = static_cast(processor->getElapsedNs() / 1000U); + processor_elem.input_wait_elapsed_us = static_cast(processor->getInputWaitElapsedNs() / 1000U); + processor_elem.output_wait_elapsed_us = static_cast(processor->getOutputWaitElapsedNs() / 1000U); + + auto stats = processor->getProcessorDataStats(); + processor_elem.input_rows = stats.input_rows; + processor_elem.input_bytes = stats.input_bytes; + processor_elem.output_rows = stats.output_rows; + processor_elem.output_bytes = stats.output_bytes; + + processors_profile_log->add(processor_elem); + } + } + } +} } diff --git a/src/Interpreters/ProcessorsProfileLog.h b/src/Interpreters/ProcessorsProfileLog.h index fbf52f45f56..9cc2ab6c7f0 100644 --- a/src/Interpreters/ProcessorsProfileLog.h +++ b/src/Interpreters/ProcessorsProfileLog.h @@ -50,4 +50,5 @@ public: using SystemLog::SystemLog; }; +void logProcessorProfile(ContextPtr context, const Processors & processors); } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 9250c069283..fa28fa04ab1 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -117,7 +117,6 @@ namespace Setting extern const SettingsOverflowMode join_overflow_mode; extern const SettingsString log_comment; extern const SettingsBool log_formatted_queries; - extern const SettingsBool log_processors_profiles; extern const SettingsBool log_profile_events; extern const SettingsUInt64 log_queries_cut_to_length; extern const SettingsBool log_queries; @@ -551,53 +550,8 @@ void logQueryFinish( if (auto query_log = context->getQueryLog()) query_log->add(elem); } - if (settings[Setting::log_processors_profiles]) - { - if (auto processors_profile_log = context->getProcessorsProfileLog()) - { - ProcessorProfileLogElement processor_elem; - processor_elem.event_time = elem.event_time; - processor_elem.event_time_microseconds = elem.event_time_microseconds; - processor_elem.initial_query_id = elem.client_info.initial_query_id; - processor_elem.query_id = elem.client_info.current_query_id; - auto get_proc_id = [](const IProcessor & proc) -> UInt64 { return reinterpret_cast(&proc); }; - - for (const auto & processor : query_pipeline.getProcessors()) - { - std::vector parents; - for (const auto & port : processor->getOutputs()) - { - if (!port.isConnected()) - continue; - const IProcessor & next = port.getInputPort().getProcessor(); - parents.push_back(get_proc_id(next)); - } - - processor_elem.id = get_proc_id(*processor); - processor_elem.parent_ids = std::move(parents); - - processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); - processor_elem.plan_step_name = processor->getPlanStepName(); - processor_elem.plan_step_description = processor->getPlanStepDescription(); - processor_elem.plan_group = processor->getQueryPlanStepGroup(); - - processor_elem.processor_name = processor->getName(); - - processor_elem.elapsed_us = static_cast(processor->getElapsedNs() / 1000U); - processor_elem.input_wait_elapsed_us = static_cast(processor->getInputWaitElapsedNs() / 1000U); - processor_elem.output_wait_elapsed_us = static_cast(processor->getOutputWaitElapsedNs() / 1000U); - - auto stats = processor->getProcessorDataStats(); - processor_elem.input_rows = stats.input_rows; - processor_elem.input_bytes = stats.input_bytes; - processor_elem.output_rows = stats.output_rows; - processor_elem.output_bytes = stats.output_bytes; - - processors_profile_log->add(processor_elem); - } - } - } + logProcessorProfile(context, query_pipeline.getProcessors()); logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); } From bd71442ea26a5263b56e6774c6938fcb24dea432 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Nov 2024 22:45:39 +0100 Subject: [PATCH 436/566] add test --- .../03270_processors_profile_log_3.reference | 2 + .../03270_processors_profile_log_3.sh | 96 +++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 tests/queries/0_stateless/03270_processors_profile_log_3.reference create mode 100755 tests/queries/0_stateless/03270_processors_profile_log_3.sh diff --git a/tests/queries/0_stateless/03270_processors_profile_log_3.reference b/tests/queries/0_stateless/03270_processors_profile_log_3.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03270_processors_profile_log_3.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03270_processors_profile_log_3.sh b/tests/queries/0_stateless/03270_processors_profile_log_3.sh new file mode 100755 index 00000000000..eb86a9f6352 --- /dev/null +++ b/tests/queries/0_stateless/03270_processors_profile_log_3.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -e + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -q " + CREATE TABLE t + ( + a UInt32, + b UInt32 + ) + ENGINE = MergeTree + ORDER BY (a, b); + + INSERT INTO t SELECT number, number FROM numbers(1000); +" + +query_id="03270_processors_profile_log_3_$RANDOM" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SET log_processors_profiles = 1; + + WITH + t0 AS + ( + SELECT * + FROM numbers(1000) + ), + t1 AS + ( + SELECT number * 3 AS b + FROM t0 + ) + SELECT b * 3 + FROM t + WHERE a IN (t1) + FORMAT Null; +" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SYSTEM FLUSH LOGS; + + SELECT sum(elapsed_us) > 0 + FROM system.processors_profile_log + WHERE event_date >= yesterday() AND query_id = '$query_id' AND name = 'CreatingSetsTransform'; +" + +##################################################################### + +$CLICKHOUSE_CLIENT -q " + CREATE TABLE t1 + ( + st FixedString(54) + ) + ENGINE = MergeTree + ORDER BY tuple(); + + INSERT INTO t1 VALUES + ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ'), + ('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'), + ('IIIIIIIIII\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'); +" + +query_id="03270_processors_profile_log_3_$RANDOM" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SET log_processors_profiles = 1; + SET max_threads=2; -- no merging when max_threads=1 + + WITH + ( + SELECT groupConcat(',')(st) + FROM t1 + ORDER BY ALL + ) AS a, + ( + SELECT groupConcat(',')(CAST(st, 'String')) + FROM t1 + ORDER BY ALL + ) AS b + SELECT a = b + FORMAT Null; +" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SYSTEM FLUSH LOGS; + + SELECT sum(elapsed_us) > 0 + FROM system.processors_profile_log + WHERE event_date >= yesterday() AND query_id = '$query_id' AND name = 'MergingSortedTransform'; +" + From 7310376413e34ad2c958c5ccde9cddd75e0a5aed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 12 Nov 2024 01:23:01 +0100 Subject: [PATCH 437/566] Remove ridiculous code bloat --- .../AggregateFunctionDeltaSumTimestamp.cpp | 69 ++++++++++++++---- src/AggregateFunctions/Helpers.h | 70 +------------------ 2 files changed, 58 insertions(+), 81 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index dc1adead87c..0c5b752b539 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -22,6 +22,13 @@ namespace ErrorCodes namespace { +/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations + * over all pairs of data types, and we deeply regret that. + * + * We cannot remove all combinations, because the binary representation of serialized data has to remain the same, + * but we can partially heal the wound by treating unsigned and signed data types in the same way. + */ + template struct AggregationFunctionDeltaSumTimestampData { @@ -37,23 +44,22 @@ template class AggregationFunctionDeltaSumTimestamp final : public IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - > + AggregationFunctionDeltaSumTimestamp> { public: AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{arguments, params, createResultType()} - {} + AggregationFunctionDeltaSumTimestamp>{arguments, params, createResultType()} + { + } AggregationFunctionDeltaSumTimestamp() : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{} - {} + AggregationFunctionDeltaSumTimestamp>{} + { + } bool allocatesMemoryInArena() const override { return false; } @@ -63,8 +69,8 @@ public: void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - auto value = assert_cast &>(*columns[0]).getData()[row_num]; - auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + auto value = unalignedLoad(columns[0]->getRawData().data() + row_num * sizeof(ValueType)); + auto ts = unalignedLoad(columns[1]->getRawData().data() + row_num * sizeof(TimestampType)); auto & data = this->data(place); @@ -172,10 +178,49 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).sum); + static_cast(to).template insertRawData( + reinterpret_cast(&this->data(place).sum)); } }; + + +template class AggregateFunctionTemplate, typename... TArgs> +static IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); + + if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(args...); + + return nullptr; +} + +template