From 77a5865a22d290033aec1894b2c79e688f713238 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 13 Jan 2024 22:30:30 +0100 Subject: [PATCH 001/267] Adding FP16 --- base/base/BFloat16.h | 9 ++++++++ base/base/DecomposedFloat.h | 10 +++++++++ base/base/TypeLists.h | 2 +- base/base/TypeName.h | 1 + base/base/extended_types.h | 20 ++++++++++++++--- base/base/wide_integer.h | 2 +- base/base/wide_integer_impl.h | 8 ++++++- .../AggregateFunctionGroupArray.cpp | 4 ++-- .../AggregateFunctionGroupArrayMoving.cpp | 2 +- .../AggregateFunctionIntervalLengthSum.cpp | 4 ++-- .../AggregateFunctionSparkbar.cpp | 6 ++--- src/AggregateFunctions/AggregateFunctionSum.h | 6 ++--- .../AggregateFunctionUniqCombined.h | 2 +- src/AggregateFunctions/QuantileTDigest.h | 2 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- .../ReservoirSamplerDeterministic.h | 2 +- src/Columns/ColumnArray.cpp | 4 ++++ src/Columns/ColumnNullable.cpp | 2 ++ src/Columns/ColumnVector.cpp | 13 ++++++----- src/Columns/ColumnVector.h | 1 + src/Columns/ColumnsCommon.cpp | 1 + src/Columns/ColumnsNumber.h | 1 + src/Columns/MaskOperations.cpp | 2 ++ src/Columns/tests/gtest_column_vector.cpp | 1 + src/Columns/tests/gtest_low_cardinality.cpp | 1 + src/Common/FieldVisitorConvertToNumber.h | 4 ++-- src/Common/HashTable/Hash.h | 1 + src/Common/HashTable/HashTable.h | 2 +- src/Common/NaNUtils.h | 6 ++--- src/Common/findExtreme.h | 2 +- src/Common/transformEndianness.h | 2 +- src/Core/AccurateComparison.h | 18 +++++++-------- src/Core/DecimalFunctions.h | 2 +- src/Core/Field.h | 1 + src/Core/SortCursor.h | 1 + src/Core/TypeId.h | 2 ++ src/Core/Types_fwd.h | 7 +----- src/Core/callOnTypeIndex.h | 3 +++ src/DataTypes/DataTypeNumberBase.cpp | 1 + src/DataTypes/DataTypeNumberBase.h | 1 + src/DataTypes/DataTypesDecimal.h | 5 +++-- src/DataTypes/DataTypesNumber.cpp | 1 + src/DataTypes/DataTypesNumber.h | 1 + src/DataTypes/IDataType.h | 5 ++++- src/DataTypes/NumberTraits.h | 22 +++++++++---------- .../Serializations/SerializationNumber.cpp | 1 + src/DataTypes/Utils.cpp | 7 ++++++ src/DataTypes/getLeastSupertype.cpp | 6 ++++- src/DataTypes/getMostSubtype.cpp | 6 ++++- src/Formats/ProtobufSerializer.cpp | 2 +- src/Functions/DivisionUtils.h | 16 +++++++------- src/Functions/FunctionMathUnary.h | 4 ++-- src/Functions/FunctionsConversion.h | 12 +++++----- src/Functions/FunctionsRound.h | 2 +- src/Functions/factorial.cpp | 2 +- src/Functions/minus.cpp | 4 ++-- src/Functions/moduloOrZero.cpp | 2 +- src/Functions/multiply.cpp | 4 ++-- src/Functions/plus.cpp | 4 ++-- src/Functions/sign.cpp | 2 +- src/IO/ReadHelpers.h | 4 +++- src/IO/WriteHelpers.h | 22 +++++++++++-------- src/Interpreters/RowRefs.cpp | 2 +- 63 files changed, 192 insertions(+), 105 deletions(-) create mode 100644 base/base/BFloat16.h diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h new file mode 100644 index 00000000000..17c3ebe9ef3 --- /dev/null +++ b/base/base/BFloat16.h @@ -0,0 +1,9 @@ +#pragma once + +using BFloat16 = __bf16; + +namespace std +{ + inline constexpr bool isfinite(BFloat16) { return true; } + inline constexpr bool signbit(BFloat16) { return false; } +} diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index f152637b94e..fda7ee8d3f4 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -10,6 +10,15 @@ template struct FloatTraits; +template <> +struct FloatTraits<__bf16> +{ + using UInt = uint16_t; + static constexpr size_t bits = 16; + static constexpr size_t exponent_bits = 8; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + template <> struct FloatTraits { @@ -217,3 +226,4 @@ struct DecomposedFloat using DecomposedFloat64 = DecomposedFloat; using DecomposedFloat32 = DecomposedFloat; +using DecomposedFloat16 = DecomposedFloat<__bf16>; diff --git a/base/base/TypeLists.h b/base/base/TypeLists.h index 6c1283d054c..ce3111b1da3 100644 --- a/base/base/TypeLists.h +++ b/base/base/TypeLists.h @@ -9,7 +9,7 @@ namespace DB { using TypeListNativeInt = TypeList; -using TypeListFloat = TypeList; +using TypeListFloat = TypeList; using TypeListNativeNumber = TypeListConcat; using TypeListWideInt = TypeList; using TypeListInt = TypeListConcat; diff --git a/base/base/TypeName.h b/base/base/TypeName.h index 9005b5a2bf4..1f4b475d653 100644 --- a/base/base/TypeName.h +++ b/base/base/TypeName.h @@ -32,6 +32,7 @@ TN_MAP(Int32) TN_MAP(Int64) TN_MAP(Int128) TN_MAP(Int256) +TN_MAP(BFloat16) TN_MAP(Float32) TN_MAP(Float64) TN_MAP(String) diff --git a/base/base/extended_types.h b/base/base/extended_types.h index b58df45a97e..39665784141 100644 --- a/base/base/extended_types.h +++ b/base/base/extended_types.h @@ -4,6 +4,8 @@ #include #include +#include + using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; @@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming) template <> struct is_signed { static constexpr bool value = true; }; template <> struct is_signed { static constexpr bool value = true; }; +template <> struct is_signed { static constexpr bool value = true; }; template inline constexpr bool is_signed_v = is_signed::value; @@ -47,8 +50,6 @@ template concept is_integer = || std::is_same_v || std::is_same_v; -template concept is_floating_point = std::is_floating_point_v; - template struct is_arithmetic // NOLINT(readability-identifier-naming) { @@ -59,11 +60,24 @@ template <> struct is_arithmetic { static constexpr bool value = true; } template <> struct is_arithmetic { static constexpr bool value = true; }; template <> struct is_arithmetic { static constexpr bool value = true; }; template <> struct is_arithmetic { static constexpr bool value = true; }; - +template <> struct is_arithmetic { static constexpr bool value = true; }; template inline constexpr bool is_arithmetic_v = is_arithmetic::value; + +template +struct is_floating_point // NOLINT(readability-identifier-naming) +{ + static constexpr bool value = std::is_floating_point_v; +}; + +template <> struct is_floating_point { static constexpr bool value = true; }; + +template +inline constexpr bool is_floating_point_v = is_floating_point::value; + + template struct make_unsigned // NOLINT(readability-identifier-naming) { diff --git a/base/base/wide_integer.h b/base/base/wide_integer.h index ffd30460c03..877ef5bd137 100644 --- a/base/base/wide_integer.h +++ b/base/base/wide_integer.h @@ -117,6 +117,7 @@ public: constexpr operator long double() const noexcept; constexpr operator double() const noexcept; constexpr operator float() const noexcept; + constexpr operator __bf16() const noexcept; struct _impl; @@ -262,4 +263,3 @@ struct hash>; // NOLINTEND(*) #include "wide_integer_impl.h" - diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index c1fd7b69b7f..7b95164e44d 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - std::is_floating_point_v, + is_floating_point_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, @@ -1291,6 +1291,12 @@ constexpr integer::operator float() const noexcept return static_cast(static_cast(*this)); } +template +constexpr integer::operator __bf16() const noexcept +{ + return static_cast<__bf16>(static_cast(*this)); +} + // Unary operators template constexpr integer operator~(const integer & lhs) noexcept diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 6c6397e35d5..bcefa6b93dc 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -74,7 +74,7 @@ template struct GroupArraySamplerData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; @@ -116,7 +116,7 @@ template struct GroupArrayNumericData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index 026b8d1956f..ee6a82686c5 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -38,7 +38,7 @@ template struct MovingData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || std::is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point_v); using Accumulator = T; diff --git a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp index eacd0596757..06156643aa0 100644 --- a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp @@ -187,7 +187,7 @@ public: static DataTypePtr createResultType() { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::make_shared(); return std::make_shared(); } @@ -227,7 +227,7 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); else assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index b6e538520a8..f4214f3a133 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -50,7 +50,7 @@ struct AggregateFunctionSparkbarData auto [it, inserted] = points.insert({x, y}); if (!inserted) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { it->getMapped() += y; return it->getMapped(); @@ -197,7 +197,7 @@ private: Y res; bool has_overfllow = false; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) res = histogram[index] + point.getMapped(); else has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res); @@ -246,7 +246,7 @@ private: } constexpr auto levels_num = static_cast(BAR_LEVELS - 1); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { y = y / (y_max / levels_num) + 1; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 5781ab69c6b..81df3244b38 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -69,7 +69,7 @@ struct AggregateFunctionSumData size_t count = end - start; const auto * end_ptr = ptr + count; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// Compiler cannot unroll this loop, do it manually. /// (at least for floats, most likely due to the lack of -fassociative-math) @@ -164,7 +164,7 @@ struct AggregateFunctionSumData return; } - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) @@ -277,7 +277,7 @@ struct AggregateFunctionSumData template struct AggregateFunctionSumKahanData { - static_assert(std::is_floating_point_v, + static_assert(is_floating_point_v, "It doesn't make sense to use Kahan Summation algorithm for non floating point types"); T sum{}; diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h index 10774442610..19e2665f9af 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.h +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.h @@ -114,7 +114,7 @@ public: /// Initially UInt128 was introduced only for UUID, and then the other big-integer types were added. hash = static_cast(sipHash64(value)); } - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { hash = static_cast(intHash64(bit_cast(value))); } diff --git a/src/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h index 979c3f2af15..1407b73e669 100644 --- a/src/AggregateFunctions/QuantileTDigest.h +++ b/src/AggregateFunctions/QuantileTDigest.h @@ -380,7 +380,7 @@ public: ResultType getImpl(Float64 level) { if (centroids.empty()) - return std::is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; + return is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; compress(); diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 37fc05a2e4c..242540102b8 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -278,6 +278,6 @@ private: if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index daed0b98ca3..75af6638183 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -273,7 +273,7 @@ private: if (OnEmpty == ReservoirSamplerDeterministicOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSamplerDeterministic"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 1cb8188bce6..4aaaf01e5ea 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -574,6 +574,8 @@ ColumnPtr ColumnArray::filter(const Filter & filt, ssize_t result_size_hint) con return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) return filterNumber(filt, result_size_hint); + if (typeid_cast(data.get())) + return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) return filterNumber(filt, result_size_hint); if (typeid_cast(data.get())) @@ -993,6 +995,8 @@ ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) return replicateNumber(replicate_offsets); + if (typeid_cast(data.get())) + return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) return replicateNumber(replicate_offsets); if (typeid_cast(data.get())) diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 4ee6bb3d586..3513ac06dcd 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -171,6 +171,8 @@ StringRef ColumnNullable::serializeValueIntoArena(size_t n, Arena & arena, char return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Int256: return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); + case TypeIndex::BFloat16: + return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Float32: return static_cast(nested_column.get())->serializeValueIntoArena(n, arena, begin, &arr[n]); case TypeIndex::Float64: diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index b1cf449dfde..bad84e7147c 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -141,7 +141,7 @@ struct ColumnVector::less_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -173,7 +173,7 @@ struct ColumnVector::greater_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -259,7 +259,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction bool sort_is_stable = stability == IColumn::PermutationSortStability::Stable; /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !std::is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. if (data_size >= 256 && data_size <= std::numeric_limits::max() && use_radix_sort) @@ -286,7 +286,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (std::is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point_v && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -333,7 +333,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct if constexpr (is_arithmetic_v && !is_big_int_v) { /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !std::is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; size_t size = end - begin; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. @@ -356,7 +356,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (std::is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point_v && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -970,6 +970,7 @@ template class ColumnVector; template class ColumnVector; template class ColumnVector; template class ColumnVector; +template class ColumnVector; template class ColumnVector; template class ColumnVector; template class ColumnVector; diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index fab2d5f06aa..c976fac3bab 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -570,6 +570,7 @@ extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; +extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; extern template class ColumnVector; diff --git a/src/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp index 4ac84e10750..444f5fae87a 100644 --- a/src/Columns/ColumnsCommon.cpp +++ b/src/Columns/ColumnsCommon.cpp @@ -328,6 +328,7 @@ INSTANTIATE(Int32) INSTANTIATE(Int64) INSTANTIATE(Int128) INSTANTIATE(Int256) +INSTANTIATE(BFloat16) INSTANTIATE(Float32) INSTANTIATE(Float64) INSTANTIATE(Decimal32) diff --git a/src/Columns/ColumnsNumber.h b/src/Columns/ColumnsNumber.h index ae7eddb0b22..2dce2269079 100644 --- a/src/Columns/ColumnsNumber.h +++ b/src/Columns/ColumnsNumber.h @@ -23,6 +23,7 @@ using ColumnInt64 = ColumnVector; using ColumnInt128 = ColumnVector; using ColumnInt256 = ColumnVector; +using ColumnBFloat16 = ColumnVector; using ColumnFloat32 = ColumnVector; using ColumnFloat64 = ColumnVector; diff --git a/src/Columns/MaskOperations.cpp b/src/Columns/MaskOperations.cpp index b84268356a7..ca4ca263811 100644 --- a/src/Columns/MaskOperations.cpp +++ b/src/Columns/MaskOperations.cpp @@ -63,6 +63,7 @@ INSTANTIATE(Int32) INSTANTIATE(Int64) INSTANTIATE(Int128) INSTANTIATE(Int256) +INSTANTIATE(BFloat16) INSTANTIATE(Float32) INSTANTIATE(Float64) INSTANTIATE(Decimal32) @@ -225,6 +226,7 @@ MaskInfo extractMaskImpl( || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) + || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info) || extractMaskNumeric(mask, column, null_value, null_bytemap, nulls, mask_info))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot convert column {} to mask.", column->getName()); diff --git a/src/Columns/tests/gtest_column_vector.cpp b/src/Columns/tests/gtest_column_vector.cpp index b71d4a095ab..3a084a89079 100644 --- a/src/Columns/tests/gtest_column_vector.cpp +++ b/src/Columns/tests/gtest_column_vector.cpp @@ -93,6 +93,7 @@ TEST(ColumnVector, Filter) testFilter(); testFilter(); testFilter(); + testFilter(); testFilter(); testFilter(); testFilter(); diff --git a/src/Columns/tests/gtest_low_cardinality.cpp b/src/Columns/tests/gtest_low_cardinality.cpp index 5e01279b7df..965c0d219b9 100644 --- a/src/Columns/tests/gtest_low_cardinality.cpp +++ b/src/Columns/tests/gtest_low_cardinality.cpp @@ -45,6 +45,7 @@ TEST(ColumnLowCardinality, Insert) testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); + testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); testLowCardinalityNumberInsert(std::make_shared()); } diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index bf8c8c8638e..38144650b97 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -58,7 +58,7 @@ public: T operator() (const Float64 & x) const { - if constexpr (!std::is_floating_point_v) + if constexpr (!is_floating_point_v) { if (!isFinite(x)) { @@ -88,7 +88,7 @@ public: template T operator() (const DecimalField & x) const { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return x.getValue().template convertTo() / x.getScaleMultiplier().template convertTo(); else return (x.getValue() / x.getScaleMultiplier()). template convertTo(); diff --git a/src/Common/HashTable/Hash.h b/src/Common/HashTable/Hash.h index fb6afcde133..b4bc6af1cef 100644 --- a/src/Common/HashTable/Hash.h +++ b/src/Common/HashTable/Hash.h @@ -322,6 +322,7 @@ DEFINE_HASH(Int32) DEFINE_HASH(Int64) DEFINE_HASH(Int128) DEFINE_HASH(Int256) +DEFINE_HASH(BFloat16) DEFINE_HASH(Float32) DEFINE_HASH(Float64) DEFINE_HASH(DB::UUID) diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index f23c4ca15dd..e4d5d3868c8 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -91,7 +91,7 @@ inline bool bitEquals(T && a, T && b) { using RealT = std::decay_t; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return 0 == memcmp(&a, &b, sizeof(RealT)); /// Note that memcmp with constant size is compiler builtin. else return a == b; diff --git a/src/Common/NaNUtils.h b/src/Common/NaNUtils.h index 1c5a619e919..6363e3e61a2 100644 --- a/src/Common/NaNUtils.h +++ b/src/Common/NaNUtils.h @@ -9,7 +9,7 @@ template inline bool isNaN(T x) { /// To be sure, that this function is zero-cost for non-floating point types. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::isnan(x); else return false; @@ -19,7 +19,7 @@ inline bool isNaN(T x) template inline bool isFinite(T x) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::isfinite(x); else return true; @@ -29,7 +29,7 @@ inline bool isFinite(T x) template T NaNOrZero() { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return std::numeric_limits::quiet_NaN(); else return {}; diff --git a/src/Common/findExtreme.h b/src/Common/findExtreme.h index b38c24697c0..611af023d33 100644 --- a/src/Common/findExtreme.h +++ b/src/Common/findExtreme.h @@ -11,7 +11,7 @@ namespace DB { template -concept is_any_native_number = (is_any_of); +concept is_any_native_number = (is_any_of); template std::optional findExtremeMin(const T * __restrict ptr, size_t start, size_t end); diff --git a/src/Common/transformEndianness.h b/src/Common/transformEndianness.h index 1657305acda..2a0c45efe38 100644 --- a/src/Common/transformEndianness.h +++ b/src/Common/transformEndianness.h @@ -38,7 +38,7 @@ inline void transformEndianness(T & x) } template -requires std::is_floating_point_v +requires is_floating_point_v inline void transformEndianness(T & value) { if constexpr (ToEndian != FromEndian) diff --git a/src/Core/AccurateComparison.h b/src/Core/AccurateComparison.h index a201c136e3a..82d06876fe3 100644 --- a/src/Core/AccurateComparison.h +++ b/src/Core/AccurateComparison.h @@ -25,7 +25,7 @@ bool lessOp(A a, B b) return a < b; /// float vs float - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) return a < b; /// anything vs NaN @@ -49,7 +49,7 @@ bool lessOp(A a, B b) } /// int vs float - if constexpr (is_integer && std::is_floating_point_v) + if constexpr (is_integer && is_floating_point_v) { if constexpr (sizeof(A) <= 4) return static_cast(a) < static_cast(b); @@ -57,7 +57,7 @@ bool lessOp(A a, B b) return DecomposedFloat(b).greater(a); } - if constexpr (std::is_floating_point_v && is_integer) + if constexpr (is_floating_point_v && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) < static_cast(b); @@ -65,8 +65,8 @@ bool lessOp(A a, B b) return DecomposedFloat(a).less(b); } - static_assert(is_integer || std::is_floating_point_v); - static_assert(is_integer || std::is_floating_point_v); + static_assert(is_integer || is_floating_point_v); + static_assert(is_integer || is_floating_point_v); UNREACHABLE(); } @@ -101,7 +101,7 @@ bool equalsOp(A a, B b) return a == b; /// float vs float - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) return a == b; /// anything vs NaN @@ -125,7 +125,7 @@ bool equalsOp(A a, B b) } /// int vs float - if constexpr (is_integer && std::is_floating_point_v) + if constexpr (is_integer && is_floating_point_v) { if constexpr (sizeof(A) <= 4) return static_cast(a) == static_cast(b); @@ -133,7 +133,7 @@ bool equalsOp(A a, B b) return DecomposedFloat(b).equals(a); } - if constexpr (std::is_floating_point_v && is_integer) + if constexpr (is_floating_point_v && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) == static_cast(b); @@ -163,7 +163,7 @@ inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result) return true; } - if constexpr (std::is_floating_point_v && std::is_floating_point_v) + if constexpr (is_floating_point_v && is_floating_point_v) { /// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type. if (isNaN(value)) diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 8dad00c3a1e..c5bc4ad70f6 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -310,7 +310,7 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/Core/Field.h b/src/Core/Field.h index 6afa98ed9c0..be70eb1ea07 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -251,6 +251,7 @@ template <> struct NearestFieldTypeImpl> { using Type = template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; template <> struct NearestFieldTypeImpl> { using Type = DecimalField; }; +template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = Float64; }; template <> struct NearestFieldTypeImpl { using Type = String; }; diff --git a/src/Core/SortCursor.h b/src/Core/SortCursor.h index 3c412fa1f17..a9dc90a8fa1 100644 --- a/src/Core/SortCursor.h +++ b/src/Core/SortCursor.h @@ -687,6 +687,7 @@ private: SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, + SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, SortingQueueImpl>, strategy>, diff --git a/src/Core/TypeId.h b/src/Core/TypeId.h index 9c634d2321c..73fa7da37e2 100644 --- a/src/Core/TypeId.h +++ b/src/Core/TypeId.h @@ -21,6 +21,7 @@ enum class TypeIndex Int64, Int128, Int256, + BFloat16, Float32, Float64, Date, @@ -91,6 +92,7 @@ TYPEID_MAP(Int32) TYPEID_MAP(Int64) TYPEID_MAP(Int128) TYPEID_MAP(Int256) +TYPEID_MAP(BFloat16) TYPEID_MAP(Float32) TYPEID_MAP(Float64) TYPEID_MAP(UUID) diff --git a/src/Core/Types_fwd.h b/src/Core/Types_fwd.h index a59e4b6eab8..2dffc910f9b 100644 --- a/src/Core/Types_fwd.h +++ b/src/Core/Types_fwd.h @@ -21,6 +21,7 @@ using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; using Int256 = wide::integer<256, signed>; using UInt256 = wide::integer<256, unsigned>; +using BFloat16 = __bf16; namespace DB { @@ -28,16 +29,10 @@ namespace DB using UUID = StrongTypedef; struct IPv4; - struct IPv6; struct Null; -using UInt128 = ::UInt128; -using UInt256 = ::UInt256; -using Int128 = ::Int128; -using Int256 = ::Int256; - enum class TypeIndex; /// Not a data type in database, defined just for convenience. diff --git a/src/Core/callOnTypeIndex.h b/src/Core/callOnTypeIndex.h index f5f67df563b..68aba2263c7 100644 --- a/src/Core/callOnTypeIndex.h +++ b/src/Core/callOnTypeIndex.h @@ -62,6 +62,7 @@ static bool callOnBasicType(TypeIndex number, F && f) { switch (number) { + case TypeIndex::BFloat16: return f(TypePair()); case TypeIndex::Float32: return f(TypePair()); case TypeIndex::Float64: return f(TypePair()); default: @@ -132,6 +133,7 @@ static inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F { switch (type_num1) { + case TypeIndex::BFloat16: return callOnBasicType(type_num2, std::forward(f)); case TypeIndex::Float32: return callOnBasicType(type_num2, std::forward(f)); case TypeIndex::Float64: return callOnBasicType(type_num2, std::forward(f)); default: @@ -189,6 +191,7 @@ static bool callOnIndexAndDataType(TypeIndex number, F && f, ExtraArgs && ... ar case TypeIndex::Int128: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Int256: return f(TypePair, T>(), std::forward(args)...); + case TypeIndex::BFloat16: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Float32: return f(TypePair, T>(), std::forward(args)...); case TypeIndex::Float64: return f(TypePair, T>(), std::forward(args)...); diff --git a/src/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp index be448fe1491..636d557f4d0 100644 --- a/src/DataTypes/DataTypeNumberBase.cpp +++ b/src/DataTypes/DataTypeNumberBase.cpp @@ -42,6 +42,7 @@ template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; +template class DataTypeNumberBase; template class DataTypeNumberBase; template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h index 3a5b11c5124..11b9427a14d 100644 --- a/src/DataTypes/DataTypeNumberBase.h +++ b/src/DataTypes/DataTypeNumberBase.h @@ -68,6 +68,7 @@ extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; +extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; extern template class DataTypeNumberBase; diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index e2b433cbe2f..12d061b11e5 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -209,9 +210,9 @@ inline ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & static constexpr bool throw_exception = std::is_same_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { - if (!std::isfinite(value)) + if (!isFinite(value)) { if constexpr (throw_exception) throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow. Cannot convert infinity or NaN to decimal", ToDataType::family_name); diff --git a/src/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp index 1c0c418411b..81c64df9711 100644 --- a/src/DataTypes/DataTypesNumber.cpp +++ b/src/DataTypes/DataTypesNumber.cpp @@ -54,6 +54,7 @@ void registerDataTypeNumbers(DataTypeFactory & factory) factory.registerDataType("Int32", createNumericDataType); factory.registerDataType("Int64", createNumericDataType); + factory.registerDataType("BFloat16", createNumericDataType); factory.registerDataType("Float32", createNumericDataType); factory.registerDataType("Float64", createNumericDataType); diff --git a/src/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h index 0c1f88a7925..1fe95f58e99 100644 --- a/src/DataTypes/DataTypesNumber.h +++ b/src/DataTypes/DataTypesNumber.h @@ -63,6 +63,7 @@ using DataTypeInt8 = DataTypeNumber; using DataTypeInt16 = DataTypeNumber; using DataTypeInt32 = DataTypeNumber; using DataTypeInt64 = DataTypeNumber; +using DataTypeBFloat16 = DataTypeNumber; using DataTypeFloat32 = DataTypeNumber; using DataTypeFloat64 = DataTypeNumber; diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index eabf066bc3d..ac71a61683a 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -372,9 +372,10 @@ struct WhichDataType constexpr bool isDecimal256() const { return idx == TypeIndex::Decimal256; } constexpr bool isDecimal() const { return isDecimal32() || isDecimal64() || isDecimal128() || isDecimal256(); } + constexpr bool isBFloat16() const { return idx == TypeIndex::BFloat16; } constexpr bool isFloat32() const { return idx == TypeIndex::Float32; } constexpr bool isFloat64() const { return idx == TypeIndex::Float64; } - constexpr bool isFloat() const { return isFloat32() || isFloat64(); } + constexpr bool isFloat() const { return isBFloat16() || isFloat32() || isFloat64(); } constexpr bool isNativeNumber() const { return isNativeInteger() || isFloat(); } constexpr bool isNumber() const { return isInteger() || isFloat() || isDecimal(); } @@ -558,6 +559,7 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int16) \ M(Int32) \ M(Int64) \ + M(BFloat16) \ M(Float32) \ M(Float64) @@ -574,6 +576,7 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int64) \ M(Int128) \ M(Int256) \ + M(BFloat16) \ M(Float32) \ M(Float64) } diff --git a/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h index cf283d3358c..35a6238c71a 100644 --- a/src/DataTypes/NumberTraits.h +++ b/src/DataTypes/NumberTraits.h @@ -74,7 +74,7 @@ template struct ResultOfAdditionMultiplication { using Type = typename Construct< is_signed_v || is_signed_v, - std::is_floating_point_v || std::is_floating_point_v, + is_floating_point_v || is_floating_point_v, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -82,7 +82,7 @@ template struct ResultOfSubtraction { using Type = typename Construct< true, - std::is_floating_point_v || std::is_floating_point_v, + is_floating_point_v || is_floating_point_v, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -113,7 +113,7 @@ template struct ResultOfModulo /// Example: toInt32(-199) % toUInt8(200) will return -199 that does not fit in Int8, only in Int16. static constexpr size_t size_of_result = result_is_signed ? nextSize(sizeof(B)) : sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfPositiveModulo @@ -121,21 +121,21 @@ template struct ResultOfPositiveModulo /// function positive_modulo always return non-negative number. static constexpr size_t size_of_result = sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfModuloLegacy { using Type0 = typename Construct || is_signed_v, false, sizeof(B)>::Type; - using Type = std::conditional_t || std::is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; }; template struct ResultOfNegate { using Type = typename Construct< true, - std::is_floating_point_v, + is_floating_point_v, is_signed_v ? sizeof(A) : nextSize(sizeof(A))>::Type; }; @@ -143,7 +143,7 @@ template struct ResultOfAbs { using Type = typename Construct< false, - std::is_floating_point_v, + is_floating_point_v, sizeof(A)>::Type; }; @@ -154,7 +154,7 @@ template struct ResultOfBit using Type = typename Construct< is_signed_v || is_signed_v, false, - std::is_floating_point_v || std::is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; + is_floating_point_v || is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; }; template struct ResultOfBitNot @@ -180,7 +180,7 @@ template struct ResultOfBitNot template struct ResultOfIf { - static constexpr bool has_float = std::is_floating_point_v || std::is_floating_point_v; + static constexpr bool has_float = is_floating_point_v || is_floating_point_v; static constexpr bool has_integer = is_integer || is_integer; static constexpr bool has_signed = is_signed_v || is_signed_v; static constexpr bool has_unsigned = !is_signed_v || !is_signed_v; @@ -189,7 +189,7 @@ struct ResultOfIf static constexpr size_t max_size_of_unsigned_integer = max(is_signed_v ? 0 : sizeof(A), is_signed_v ? 0 : sizeof(B)); static constexpr size_t max_size_of_signed_integer = max(is_signed_v ? sizeof(A) : 0, is_signed_v ? sizeof(B) : 0); static constexpr size_t max_size_of_integer = max(is_integer ? sizeof(A) : 0, is_integer ? sizeof(B) : 0); - static constexpr size_t max_size_of_float = max(std::is_floating_point_v ? sizeof(A) : 0, std::is_floating_point_v ? sizeof(B) : 0); + static constexpr size_t max_size_of_float = max(is_floating_point_v ? sizeof(A) : 0, is_floating_point_v ? sizeof(B) : 0); using ConstructedType = typename Construct= max_size_of_float) @@ -211,7 +211,7 @@ template struct ToInteger using Type = typename Construct< is_signed_v, false, - std::is_floating_point_v ? 8 : sizeof(A)>::Type; + is_floating_point_v ? 8 : sizeof(A)>::Type; }; diff --git a/src/DataTypes/Serializations/SerializationNumber.cpp b/src/DataTypes/Serializations/SerializationNumber.cpp index b6c7e4618b8..805253fccee 100644 --- a/src/DataTypes/Serializations/SerializationNumber.cpp +++ b/src/DataTypes/Serializations/SerializationNumber.cpp @@ -176,6 +176,7 @@ template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; +template class SerializationNumber; template class SerializationNumber; template class SerializationNumber; diff --git a/src/DataTypes/Utils.cpp b/src/DataTypes/Utils.cpp index e58331a8bcb..d1e314e77dc 100644 --- a/src/DataTypes/Utils.cpp +++ b/src/DataTypes/Utils.cpp @@ -54,6 +54,13 @@ bool canBeSafelyCasted(const DataTypePtr & from_type, const DataTypePtr & to_typ return false; } + case TypeIndex::BFloat16: + { + if (to_which_type.isFloat32() || to_which_type.isFloat64() || to_which_type.isString()) + return true; + + return false; + } case TypeIndex::Float32: { if (to_which_type.isFloat64() || to_which_type.isString()) diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index e5bdb4b267f..0ed075563e2 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -108,6 +108,8 @@ DataTypePtr getNumericType(const TypeIndexSet & types) maximize(max_bits_of_signed_integer, 128); else if (type == TypeIndex::Int256) maximize(max_bits_of_signed_integer, 256); + else if (type == TypeIndex::BFloat16) + maximize(max_mantissa_bits_of_floating, 8); else if (type == TypeIndex::Float32) maximize(max_mantissa_bits_of_floating, 24); else if (type == TypeIndex::Float64) @@ -144,7 +146,9 @@ DataTypePtr getNumericType(const TypeIndexSet & types) if (max_mantissa_bits_of_floating) { size_t min_mantissa_bits = std::max(min_bit_width_of_integer, max_mantissa_bits_of_floating); - if (min_mantissa_bits <= 24) + if (min_mantissa_bits <= 8) + return std::make_shared(); + else if (min_mantissa_bits <= 24) return std::make_shared(); else if (min_mantissa_bits <= 53) return std::make_shared(); diff --git a/src/DataTypes/getMostSubtype.cpp b/src/DataTypes/getMostSubtype.cpp index 33b5735456e..d0ea716f2ff 100644 --- a/src/DataTypes/getMostSubtype.cpp +++ b/src/DataTypes/getMostSubtype.cpp @@ -297,6 +297,8 @@ DataTypePtr getMostSubtype(const DataTypes & types, bool throw_if_result_is_noth minimize(min_bits_of_signed_integer, 128); else if (typeid_cast(type.get())) minimize(min_bits_of_signed_integer, 256); + else if (typeid_cast(type.get())) + minimize(min_mantissa_bits_of_floating, 8); else if (typeid_cast(type.get())) minimize(min_mantissa_bits_of_floating, 24); else if (typeid_cast(type.get())) @@ -313,7 +315,9 @@ DataTypePtr getMostSubtype(const DataTypes & types, bool throw_if_result_is_noth /// If the result must be floating. if (!min_bits_of_signed_integer && !min_bits_of_unsigned_integer) { - if (min_mantissa_bits_of_floating <= 24) + if (min_mantissa_bits_of_floating <= 8) + return std::make_shared(); + else if (min_mantissa_bits_of_floating <= 24) return std::make_shared(); else if (min_mantissa_bits_of_floating <= 53) return std::make_shared(); diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index dd37c25719c..872991709af 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -540,7 +540,7 @@ namespace case FieldTypeId::TYPE_ENUM: { - if (std::is_floating_point_v) + if (is_floating_point_v) incompatibleColumnType(TypeName); write_function = [this](NumberType value) diff --git a/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h index ff07309e248..2508bd2b62b 100644 --- a/src/Functions/DivisionUtils.h +++ b/src/Functions/DivisionUtils.h @@ -47,9 +47,9 @@ inline auto checkedDivision(A a, B b) { throwIfDivisionLeadsToFPE(a, b); - if constexpr (is_big_int_v && std::is_floating_point_v) + if constexpr (is_big_int_v && is_floating_point_v) return static_cast(a) / b; - else if constexpr (is_big_int_v && std::is_floating_point_v) + else if constexpr (is_big_int_v && is_floating_point_v) return a / static_cast(b); else if constexpr (is_big_int_v && is_big_int_v) return static_cast(a / b); @@ -86,17 +86,17 @@ struct DivideIntegralImpl { /// Comparisons are not strict to avoid rounding issues when operand is implicitly casted to float. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(a) || a >= std::numeric_limits::max() || a <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(b) || b >= std::numeric_limits::max() || b <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); auto res = checkedDivision(CastA(a), CastB(b)); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(res) || res >= static_cast(std::numeric_limits::max()) || res <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division, because it will produce infinite or too large number"); @@ -122,18 +122,18 @@ struct ModuloImpl template static inline Result apply(A a, B b) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return static_cast(a) - trunc(static_cast(a) / static_cast(b)) * static_cast(b); } else { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(a) || a > std::numeric_limits::max() || a < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) if (isNaN(b) || b > std::numeric_limits::max() || b < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); diff --git a/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h index 9f400932356..8395855a564 100644 --- a/src/Functions/FunctionMathUnary.h +++ b/src/Functions/FunctionMathUnary.h @@ -66,7 +66,7 @@ private: /// Process all data as a whole and use FastOps implementation /// If the argument is integer, convert to Float64 beforehand - if constexpr (!std::is_floating_point_v) + if constexpr (!is_floating_point_v) { PODArray tmp_vec(size); for (size_t i = 0; i < size; ++i) @@ -150,7 +150,7 @@ private: { using Types = std::decay_t; using Type = typename Types::RightType; - using ReturnType = std::conditional_t, Float64, Type>; + using ReturnType = std::conditional_t, Float64, Type>; using ColVecType = ColumnVectorOrDecimal; const auto col_vec = checkAndGetColumn(col.column.get()); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index eed75788fcd..fe4b14f5053 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -291,7 +291,7 @@ struct ConvertImpl else { /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + if constexpr (is_floating_point_v && !is_floating_point_v) { if (!isFinite(vec_from[i])) { @@ -1314,7 +1314,7 @@ inline void convertFromTime(DataTypeDateTime::FieldType & x, t template void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (precise_float_parsing) readFloatTextPrecise(x, rb); @@ -1378,7 +1378,7 @@ inline void parseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb template bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (precise_float_parsing) return tryReadFloatTextPrecise(x, rb); @@ -2350,9 +2350,9 @@ private: using RightT = typename RightDataType::FieldType; static constexpr bool bad_left = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point_v || is_big_int_v || is_signed_v; static constexpr bool bad_right = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point_v || is_big_int_v || is_signed_v; /// Disallow int vs UUID conversion (but support int vs UInt128 conversion) if constexpr ((bad_left && std::is_same_v) || @@ -2678,7 +2678,7 @@ struct ToNumberMonotonicity /// Float cases. /// When converting to Float, the conversion is always monotonic. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) return { .is_monotonic = true, .is_always_monotonic = true }; const auto * low_cardinality = typeid_cast(&type); diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 3d1028c6d35..d775d616eb2 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -461,7 +461,7 @@ template - using FunctionRoundingImpl = std::conditional_t, + using FunctionRoundingImpl = std::conditional_t, FloatRoundingImpl, IntegerRoundingImpl>; diff --git a/src/Functions/factorial.cpp b/src/Functions/factorial.cpp index b814e8198e6..be545e398cd 100644 --- a/src/Functions/factorial.cpp +++ b/src/Functions/factorial.cpp @@ -21,7 +21,7 @@ struct FactorialImpl static inline NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (std::is_floating_point_v || is_over_big_int) + if constexpr (is_floating_point_v || is_over_big_int) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type of argument of function factorial, should not be floating point or big int"); diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index 04877a42b18..109e5894f5e 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp index 3551ae74c5f..bfd786940ce 100644 --- a/src/Functions/moduloOrZero.cpp +++ b/src/Functions/moduloOrZero.cpp @@ -17,7 +17,7 @@ struct ModuloOrZeroImpl template static inline Result apply(A a, B b) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b); diff --git a/src/Functions/multiply.cpp b/src/Functions/multiply.cpp index 4dc8cd10f31..ef51fe6061e 100644 --- a/src/Functions/multiply.cpp +++ b/src/Functions/multiply.cpp @@ -18,8 +18,8 @@ struct MultiplyImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) * static_cast(static_cast(b)); } diff --git a/src/Functions/plus.cpp b/src/Functions/plus.cpp index cd9cf6cec5c..ea79fb4702a 100644 --- a/src/Functions/plus.cpp +++ b/src/Functions/plus.cpp @@ -19,8 +19,8 @@ struct PlusImpl /// Next everywhere, static_cast - so that there is no wrong result in expressions of the form Int64 c = UInt32(a) * Int32(-1). if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) + static_cast(static_cast(b)); } diff --git a/src/Functions/sign.cpp b/src/Functions/sign.cpp index 6c849760eed..59a307e43bb 100644 --- a/src/Functions/sign.cpp +++ b/src/Functions/sign.cpp @@ -13,7 +13,7 @@ struct SignImpl static inline NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_decimal || std::is_floating_point_v) + if constexpr (is_decimal || is_floating_point_v) return a < A(0) ? -1 : a == A(0) ? 0 : 1; else if constexpr (is_signed_v) return a < 0 ? -1 : a == 0 ? 0 : 1; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 85584d63ee8..6068f49f5bf 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1316,7 +1316,9 @@ inline bool tryReadText(UUID & x, ReadBuffer & buf) { return tryReadUUIDText(x, inline bool tryReadText(IPv4 & x, ReadBuffer & buf) { return tryReadIPv4Text(x, buf); } inline bool tryReadText(IPv6 & x, ReadBuffer & buf) { return tryReadIPv6Text(x, buf); } -inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatText(x, buf); } +template +requires is_floating_point_v +inline void readText(T & x, ReadBuffer & buf) { readFloatText(x, buf); } inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index b4f8b476b11..c6a86b05f4d 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -153,6 +153,7 @@ inline void writeBoolText(bool x, WriteBuffer & buf) template +requires is_floating_point_v inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; @@ -169,10 +170,13 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } else { - if (DecomposedFloat32(x).isIntegerInRepresentableRange()) - result = itoa(Int32(x), buffer) - buffer; + /// This will support 16-bit floats as well. + float f32 = x; + + if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) + result = itoa(Int32(f32), buffer) - buffer; else - result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; + result = jkj::dragonbox::to_chars_n(f32, buffer) - buffer; } if (result <= 0) @@ -181,10 +185,9 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } template +requires is_floating_point_v inline void writeFloatText(T x, WriteBuffer & buf) { - static_assert(std::is_same_v || std::is_same_v, "Argument for writeFloatText must be float or double"); - using Converter = DoubleConverter; if (likely(buf.available() >= Converter::MAX_REPRESENTATION_LENGTH)) { @@ -530,7 +533,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) bool is_finite = isFinite(x); const bool need_quote = (is_integer && (sizeof(T) >= 8) && settings.json.quote_64bit_integers) - || (settings.json.quote_denormals && !is_finite) || (is_floating_point && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); + || (settings.json.quote_denormals && !is_finite) || (is_floating_point_v && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); if (need_quote) writeChar('"', ostr); @@ -541,7 +544,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) writeCString("null", ostr); else { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point_v) { if (std::signbit(x)) { @@ -800,7 +803,6 @@ inline void writeXMLStringForTextElement(std::string_view s, WriteBuffer & buf) } /// @brief Serialize `uuid` into an array of characters in big-endian byte order. -/// @param uuid UUID to serialize. /// @return Array of characters in big-endian byte order. std::array formatUUID(const UUID & uuid); @@ -1065,7 +1067,9 @@ inline void writeText(is_integer auto x, WriteBuffer & buf) writeIntText(x, buf); } -inline void writeText(is_floating_point auto x, WriteBuffer & buf) { writeFloatText(x, buf); } +template +requires is_floating_point_v +inline void writeText(T x, WriteBuffer & buf) { writeFloatText(x, buf); } inline void writeText(is_enum auto x, WriteBuffer & buf) { writeText(magic_enum::enum_name(x), buf); } diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 4335cde47f9..61caacd8346 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -181,7 +181,7 @@ private: if (!sorted.load(std::memory_order_relaxed)) { - if constexpr (std::is_arithmetic_v && !std::is_floating_point_v) + if constexpr (std::is_arithmetic_v && !is_floating_point_v) { if (likely(entries.size() > 256)) { From 2750f8ca1d9b7336fbfec6fc0e73a8fbf17eadee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 02:27:48 +0200 Subject: [PATCH 002/267] Whitespace --- src/Storages/StorageSet.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 205a90423bf..a8c8e81e23d 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -130,7 +130,6 @@ StorageSetOrJoinBase::StorageSetOrJoinBase( storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - if (relative_path_.empty()) throw Exception(ErrorCodes::INCORRECT_FILE_NAME, "Join and Set storages require data path"); From 6e08f415c49afeac27ce08f97cde365dbf5940a2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 04:26:14 +0200 Subject: [PATCH 003/267] Preparation --- base/base/DecomposedFloat.h | 9 ++++++++ base/base/EnumReflection.h | 2 +- base/base/extended_types.h | 14 ++++-------- base/base/wide_integer_impl.h | 2 +- .../AggregateFunctionGroupArray.cpp | 4 ++-- .../AggregateFunctionGroupArrayMoving.cpp | 2 +- .../AggregateFunctionIntervalLengthSum.cpp | 4 ++-- .../AggregateFunctionSparkbar.cpp | 6 ++--- src/AggregateFunctions/AggregateFunctionSum.h | 6 ++--- src/AggregateFunctions/QuantileTDigest.h | 2 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- .../ReservoirSamplerDeterministic.h | 2 +- src/Columns/ColumnVector.cpp | 16 +++++++------- src/Common/FieldVisitorConvertToNumber.h | 4 ++-- src/Common/HashTable/HashTable.h | 2 +- src/Common/NaNUtils.h | 14 ++++++------ src/Common/findExtreme.cpp | 2 +- src/Common/transformEndianness.h | 2 +- src/Core/AccurateComparison.h | 18 +++++++-------- src/Core/DecimalFunctions.h | 2 +- src/DataTypes/DataTypesDecimal.cpp | 5 +++-- src/DataTypes/NumberTraits.h | 22 +++++++++---------- src/Formats/ProtobufSerializer.cpp | 2 +- src/Functions/DivisionUtils.h | 16 +++++++------- src/Functions/FunctionMathUnary.h | 4 ++-- src/Functions/FunctionsConversion.cpp | 12 +++++----- src/Functions/FunctionsJSON.h | 4 ++-- src/Functions/FunctionsRound.h | 2 +- src/Functions/FunctionsVisitParam.h | 2 +- src/Functions/abs.cpp | 2 +- src/Functions/array/arrayAggregation.cpp | 2 +- src/Functions/factorial.cpp | 2 +- src/Functions/if.cpp | 16 +++++++------- src/Functions/minus.cpp | 4 ++-- src/Functions/moduloOrZero.cpp | 2 +- src/Functions/multiply.cpp | 4 ++-- src/Functions/plus.cpp | 4 ++-- src/Functions/sign.cpp | 2 +- src/IO/ReadHelpers.h | 2 +- src/IO/WriteHelpers.h | 10 ++++----- src/Interpreters/RowRefs.cpp | 2 +- 41 files changed, 120 insertions(+), 116 deletions(-) diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 0997c39db16..b5bc3f08357 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -96,6 +96,15 @@ struct DecomposedFloat && ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)); } + bool isFinite() const + { + return exponent() != ((1ull << Traits::exponent_bits) - 1); + } + + bool isNaN() const + { + return !isFinite() && (mantissa() != 0); + } /// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic. /// This function is generic, big integers (128, 256 bit) are supported as well. diff --git a/base/base/EnumReflection.h b/base/base/EnumReflection.h index 4a9de4d17a3..963c7e3f1b9 100644 --- a/base/base/EnumReflection.h +++ b/base/base/EnumReflection.h @@ -4,7 +4,7 @@ #include -template concept is_enum = std::is_enum_v; +template concept is_enum = std::is_enum_v; namespace detail { diff --git a/base/base/extended_types.h b/base/base/extended_types.h index de654152649..7ddf7de7e22 100644 --- a/base/base/extended_types.h +++ b/base/base/extended_types.h @@ -43,7 +43,7 @@ template <> struct is_unsigned { static constexpr bool value = true; }; template inline constexpr bool is_unsigned_v = is_unsigned::value; -template concept is_integer = +template concept is_integer = std::is_integral_v || std::is_same_v || std::is_same_v @@ -65,16 +65,10 @@ template <> struct is_arithmetic { static constexpr bool value = true; template inline constexpr bool is_arithmetic_v = is_arithmetic::value; -template -struct is_floating_point // NOLINT(readability-identifier-naming) -{ - static constexpr bool value = std::is_floating_point_v; -}; +template concept is_floating_point = + std::is_floating_point_v + || std::is_same_v; -template <> struct is_floating_point { static constexpr bool value = true; }; - -template -inline constexpr bool is_floating_point_v = is_floating_point::value; #define FOR_EACH_ARITHMETIC_TYPE(M) \ M(DataTypeDate) \ diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index c950fd27fa3..d0bbd7df9d4 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - is_floating_point_v, + std::is_floating_point_v || std::is_same_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 0b478fe3c04..3a0bbb001c3 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -73,7 +73,7 @@ template struct GroupArraySamplerData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; @@ -115,7 +115,7 @@ template struct GroupArrayNumericData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); // Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena using Allocator = MixedAlignedArenaAllocator; diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index ee6a82686c5..a9a09d7abd5 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -38,7 +38,7 @@ template struct MovingData { /// For easy serialization. - static_assert(std::has_unique_object_representations_v || is_floating_point_v); + static_assert(std::has_unique_object_representations_v || is_floating_point); using Accumulator = T; diff --git a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp index 06156643aa0..e5404add820 100644 --- a/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionIntervalLengthSum.cpp @@ -187,7 +187,7 @@ public: static DataTypePtr createResultType() { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) return std::make_shared(); return std::make_shared(); } @@ -227,7 +227,7 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); else assert_cast(to).getData().push_back(getIntervalLengthSum(this->data(place))); diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index 5b6fc3b315c..33412d50b21 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -50,7 +50,7 @@ struct AggregateFunctionSparkbarData auto [it, inserted] = points.insert({x, y}); if (!inserted) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { it->getMapped() += y; return it->getMapped(); @@ -197,7 +197,7 @@ private: Y res; bool has_overfllow = false; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) res = histogram[index] + point.getMapped(); else has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res); @@ -246,7 +246,7 @@ private: } constexpr auto levels_num = static_cast(BAR_LEVELS - 1); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { y = y / (y_max / levels_num) + 1; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index c663c632280..d0d600be70b 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -69,7 +69,7 @@ struct AggregateFunctionSumData size_t count = end - start; const auto * end_ptr = ptr + count; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// Compiler cannot unroll this loop, do it manually. /// (at least for floats, most likely due to the lack of -fassociative-math) @@ -193,7 +193,7 @@ struct AggregateFunctionSumData Impl::add(sum, local_sum); return; } - else if constexpr (is_floating_point_v) + else if constexpr (is_floating_point) { /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) @@ -306,7 +306,7 @@ struct AggregateFunctionSumData template struct AggregateFunctionSumKahanData { - static_assert(is_floating_point_v, + static_assert(is_floating_point, "It doesn't make sense to use Kahan Summation algorithm for non floating point types"); T sum{}; diff --git a/src/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h index 408e500e941..a693c57e6d8 100644 --- a/src/AggregateFunctions/QuantileTDigest.h +++ b/src/AggregateFunctions/QuantileTDigest.h @@ -379,7 +379,7 @@ public: ResultType getImpl(Float64 level) { if (centroids.empty()) - return is_floating_point_v ? std::numeric_limits::quiet_NaN() : 0; + return is_floating_point ? std::numeric_limits::quiet_NaN() : 0; compress(); diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 182a49af2ca..c21e76614c1 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -278,6 +278,6 @@ private: if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index c9afcb21549..7fe5d23f4e4 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -272,7 +272,7 @@ private: if (OnEmpty == ReservoirSamplerDeterministicOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSamplerDeterministic"); else - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 19849b8a1c6..2b137231faa 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -118,7 +118,7 @@ struct ColumnVector::less_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -150,7 +150,7 @@ struct ColumnVector::greater_stable if (unlikely(parent.data[lhs] == parent.data[rhs])) return lhs < rhs; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (unlikely(std::isnan(parent.data[lhs]) && std::isnan(parent.data[rhs]))) { @@ -224,9 +224,9 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction iota(res.data(), data_size, IColumn::Permutation::value_type(0)); - if constexpr (has_find_extreme_implementation && !std::is_floating_point_v) + if constexpr (has_find_extreme_implementation && !is_floating_point) { - /// Disabled for:floating point + /// Disabled for floating point: /// * floating point: We don't deal with nan_direction_hint /// * stability::Stable: We might return any value, not the first if ((limit == 1) && (stability == IColumn::PermutationSortStability::Unstable)) @@ -256,7 +256,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction bool sort_is_stable = stability == IColumn::PermutationSortStability::Stable; /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point) || !sort_is_stable; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. if (data_size >= 256 && data_size <= std::numeric_limits::max() && use_radix_sort) @@ -283,7 +283,7 @@ void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point && nan_direction_hint < 0) { size_t nans_to_move = 0; @@ -330,7 +330,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct if constexpr (is_arithmetic_v && !is_big_int_v) { /// TODO: LSD RadixSort is currently not stable if direction is descending, or value is floating point - bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point_v) || !sort_is_stable; + bool use_radix_sort = (sort_is_stable && ascending && !is_floating_point) || !sort_is_stable; size_t size = end - begin; /// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters. @@ -353,7 +353,7 @@ void ColumnVector::updatePermutation(IColumn::PermutationSortDirection direct /// Radix sort treats all NaNs to be greater than all numbers. /// If the user needs the opposite, we must move them accordingly. - if (is_floating_point_v && nan_direction_hint < 0) + if (is_floating_point && nan_direction_hint < 0) { size_t nans_to_move = 0; diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index 646caadce35..ebd084df54d 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -58,7 +58,7 @@ public: T operator() (const Float64 & x) const { - if constexpr (!is_floating_point_v) + if constexpr (!is_floating_point) { if (!isFinite(x)) { @@ -88,7 +88,7 @@ public: template T operator() (const DecimalField & x) const { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) return x.getValue().template convertTo() / x.getScaleMultiplier().template convertTo(); else return (x.getValue() / x.getScaleMultiplier()).template convertTo(); diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index fd8832a56a3..8237c81461f 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -91,7 +91,7 @@ inline bool bitEquals(T && a, T && b) { using RealT = std::decay_t; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) /// Note that memcmp with constant size is compiler builtin. return 0 == memcmp(&a, &b, sizeof(RealT)); /// NOLINT else diff --git a/src/Common/NaNUtils.h b/src/Common/NaNUtils.h index 0e885541599..3e4af902104 100644 --- a/src/Common/NaNUtils.h +++ b/src/Common/NaNUtils.h @@ -3,24 +3,24 @@ #include #include #include +#include template inline bool isNaN(T x) { /// To be sure, that this function is zero-cost for non-floating point types. - if constexpr (is_floating_point_v) - return std::isnan(x); + if constexpr (is_floating_point) + return DecomposedFloat(x).isNaN(); else return false; } - template inline bool isFinite(T x) { - if constexpr (is_floating_point_v) - return std::isfinite(x); + if constexpr (is_floating_point) + return DecomposedFloat(x).isFinite(); else return true; } @@ -28,7 +28,7 @@ inline bool isFinite(T x) template bool canConvertTo(Float64 x) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) return true; if (!isFinite(x)) return false; @@ -41,7 +41,7 @@ bool canConvertTo(Float64 x) template T NaNOrZero() { - if constexpr (is_floating_point_v) + if constexpr (std::is_floating_point_v) return std::numeric_limits::quiet_NaN(); else return {}; diff --git a/src/Common/findExtreme.cpp b/src/Common/findExtreme.cpp index ce3bbb86d7c..a29750b848a 100644 --- a/src/Common/findExtreme.cpp +++ b/src/Common/findExtreme.cpp @@ -47,7 +47,7 @@ MULTITARGET_FUNCTION_AVX2_SSE42( /// Unroll the loop manually for floating point, since the compiler doesn't do it without fastmath /// as it might change the return value - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { constexpr size_t unroll_block = 512 / sizeof(T); /// Chosen via benchmarks with AVX2 so YMMV size_t unrolled_end = i + (((count - i) / unroll_block) * unroll_block); diff --git a/src/Common/transformEndianness.h b/src/Common/transformEndianness.h index 2a0c45efe38..e6e04ec75af 100644 --- a/src/Common/transformEndianness.h +++ b/src/Common/transformEndianness.h @@ -38,7 +38,7 @@ inline void transformEndianness(T & x) } template -requires is_floating_point_v +requires is_floating_point inline void transformEndianness(T & value) { if constexpr (ToEndian != FromEndian) diff --git a/src/Core/AccurateComparison.h b/src/Core/AccurateComparison.h index c1e93b8055a..87ff14e40e7 100644 --- a/src/Core/AccurateComparison.h +++ b/src/Core/AccurateComparison.h @@ -25,7 +25,7 @@ bool lessOp(A a, B b) return a < b; /// float vs float - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) return a < b; /// anything vs NaN @@ -49,7 +49,7 @@ bool lessOp(A a, B b) } /// int vs float - if constexpr (is_integer && is_floating_point_v) + if constexpr (is_integer && is_floating_point) { if constexpr (sizeof(A) <= 4) return static_cast(a) < static_cast(b); @@ -57,7 +57,7 @@ bool lessOp(A a, B b) return DecomposedFloat(b).greater(a); } - if constexpr (is_floating_point_v && is_integer) + if constexpr (is_floating_point && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) < static_cast(b); @@ -65,8 +65,8 @@ bool lessOp(A a, B b) return DecomposedFloat(a).less(b); } - static_assert(is_integer || is_floating_point_v); - static_assert(is_integer || is_floating_point_v); + static_assert(is_integer || is_floating_point); + static_assert(is_integer || is_floating_point); UNREACHABLE(); } @@ -101,7 +101,7 @@ bool equalsOp(A a, B b) return a == b; /// float vs float - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) return a == b; /// anything vs NaN @@ -125,7 +125,7 @@ bool equalsOp(A a, B b) } /// int vs float - if constexpr (is_integer && is_floating_point_v) + if constexpr (is_integer && is_floating_point) { if constexpr (sizeof(A) <= 4) return static_cast(a) == static_cast(b); @@ -133,7 +133,7 @@ bool equalsOp(A a, B b) return DecomposedFloat(b).equals(a); } - if constexpr (is_floating_point_v && is_integer) + if constexpr (is_floating_point && is_integer) { if constexpr (sizeof(B) <= 4) return static_cast(a) == static_cast(b); @@ -163,7 +163,7 @@ inline bool NO_SANITIZE_UNDEFINED convertNumeric(From value, To & result) return true; } - if constexpr (is_floating_point_v && is_floating_point_v) + if constexpr (is_floating_point && is_floating_point) { /// Note that NaNs doesn't compare equal to anything, but they are still in range of any Float type. if (isNaN(value)) diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index c5bc4ad70f6..435cef61145 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -310,7 +310,7 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index 77a7a3e7237..d87eff97675 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -269,9 +270,9 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, static constexpr bool throw_exception = std::is_same_v; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { - if (!std::isfinite(value)) + if (!isFinite(value)) { if constexpr (throw_exception) throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow. Cannot convert infinity or NaN to decimal", ToDataType::family_name); diff --git a/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h index ad1e9eaa67b..ee0d9812097 100644 --- a/src/DataTypes/NumberTraits.h +++ b/src/DataTypes/NumberTraits.h @@ -74,7 +74,7 @@ template struct ResultOfAdditionMultiplication { using Type = typename Construct< is_signed_v || is_signed_v, - is_floating_point_v || is_floating_point_v, + is_floating_point || is_floating_point, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -82,7 +82,7 @@ template struct ResultOfSubtraction { using Type = typename Construct< true, - is_floating_point_v || is_floating_point_v, + is_floating_point || is_floating_point, nextSize(max(sizeof(A), sizeof(B)))>::Type; }; @@ -113,7 +113,7 @@ template struct ResultOfModulo /// Example: toInt32(-199) % toUInt8(200) will return -199 that does not fit in Int8, only in Int16. static constexpr size_t size_of_result = result_is_signed ? nextSize(sizeof(B)) : sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfPositiveModulo @@ -121,21 +121,21 @@ template struct ResultOfPositiveModulo /// function positive_modulo always return non-negative number. static constexpr size_t size_of_result = sizeof(B); using Type0 = typename Construct::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfModuloLegacy { using Type0 = typename Construct || is_signed_v, false, sizeof(B)>::Type; - using Type = std::conditional_t || is_floating_point_v, Float64, Type0>; + using Type = std::conditional_t || is_floating_point, Float64, Type0>; }; template struct ResultOfNegate { using Type = typename Construct< true, - is_floating_point_v, + is_floating_point, is_signed_v ? sizeof(A) : nextSize(sizeof(A))>::Type; }; @@ -143,7 +143,7 @@ template struct ResultOfAbs { using Type = typename Construct< false, - is_floating_point_v, + is_floating_point, sizeof(A)>::Type; }; @@ -154,7 +154,7 @@ template struct ResultOfBit using Type = typename Construct< is_signed_v || is_signed_v, false, - is_floating_point_v || is_floating_point_v ? 8 : max(sizeof(A), sizeof(B))>::Type; + is_floating_point || is_floating_point ? 8 : max(sizeof(A), sizeof(B))>::Type; }; template struct ResultOfBitNot @@ -180,7 +180,7 @@ template struct ResultOfBitNot template struct ResultOfIf { - static constexpr bool has_float = is_floating_point_v || is_floating_point_v; + static constexpr bool has_float = is_floating_point || is_floating_point; static constexpr bool has_integer = is_integer || is_integer; static constexpr bool has_signed = is_signed_v || is_signed_v; static constexpr bool has_unsigned = !is_signed_v || !is_signed_v; @@ -189,7 +189,7 @@ struct ResultOfIf static constexpr size_t max_size_of_unsigned_integer = max(is_signed_v ? 0 : sizeof(A), is_signed_v ? 0 : sizeof(B)); static constexpr size_t max_size_of_signed_integer = max(is_signed_v ? sizeof(A) : 0, is_signed_v ? sizeof(B) : 0); static constexpr size_t max_size_of_integer = max(is_integer ? sizeof(A) : 0, is_integer ? sizeof(B) : 0); - static constexpr size_t max_size_of_float = max(is_floating_point_v ? sizeof(A) : 0, is_floating_point_v ? sizeof(B) : 0); + static constexpr size_t max_size_of_float = max(is_floating_point ? sizeof(A) : 0, is_floating_point ? sizeof(B) : 0); using ConstructedType = typename Construct= max_size_of_float) @@ -211,7 +211,7 @@ template struct ToInteger using Type = typename Construct< is_signed_v, false, - is_floating_point_v ? 8 : sizeof(A)>::Type; + is_floating_point ? 8 : sizeof(A)>::Type; }; diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 7f03bdeb45d..86b11f45b72 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -541,7 +541,7 @@ namespace case FieldTypeId::TYPE_ENUM: { - if (is_floating_point_v) + if (is_floating_point) incompatibleColumnType(TypeName); write_function = [this](NumberType value) diff --git a/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h index 1a241c7171a..e8f5da342f8 100644 --- a/src/Functions/DivisionUtils.h +++ b/src/Functions/DivisionUtils.h @@ -47,9 +47,9 @@ inline auto checkedDivision(A a, B b) { throwIfDivisionLeadsToFPE(a, b); - if constexpr (is_big_int_v && is_floating_point_v) + if constexpr (is_big_int_v && is_floating_point) return static_cast(a) / b; - else if constexpr (is_big_int_v && is_floating_point_v) + else if constexpr (is_big_int_v && is_floating_point) return a / static_cast(b); else if constexpr (is_big_int_v && is_big_int_v) return static_cast(a / b); @@ -86,17 +86,17 @@ struct DivideIntegralImpl { /// Comparisons are not strict to avoid rounding issues when operand is implicitly casted to float. - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(a) || a >= std::numeric_limits::max() || a <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(b) || b >= std::numeric_limits::max() || b <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); auto res = checkedDivision(CastA(a), CastB(b)); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(res) || res >= static_cast(std::numeric_limits::max()) || res <= std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division, because it will produce infinite or too large number"); @@ -122,18 +122,18 @@ struct ModuloImpl template static Result apply(A a, B b) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return static_cast(a) - trunc(static_cast(a) / static_cast(b)) * static_cast(b); } else { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(a) || a > std::numeric_limits::max() || a < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) if (isNaN(b) || b > std::numeric_limits::max() || b < std::numeric_limits::lowest()) throw Exception(ErrorCodes::ILLEGAL_DIVISION, "Cannot perform integer division on infinite or too large floating point numbers"); diff --git a/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h index 8395855a564..2cbd9b2e03c 100644 --- a/src/Functions/FunctionMathUnary.h +++ b/src/Functions/FunctionMathUnary.h @@ -66,7 +66,7 @@ private: /// Process all data as a whole and use FastOps implementation /// If the argument is integer, convert to Float64 beforehand - if constexpr (!is_floating_point_v) + if constexpr (!is_floating_point) { PODArray tmp_vec(size); for (size_t i = 0; i < size; ++i) @@ -150,7 +150,7 @@ private: { using Types = std::decay_t; using Type = typename Types::RightType; - using ReturnType = std::conditional_t, Float64, Type>; + using ReturnType = std::conditional_t, Float64, Type>; using ColVecType = ColumnVectorOrDecimal; const auto col_vec = checkAndGetColumn(col.column.get()); diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 44d0b750af9..8512ea5726f 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -638,7 +638,7 @@ inline void convertFromTime(DataTypeDateTime::FieldType & x, t template void parseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (precise_float_parsing) readFloatTextPrecise(x, rb); @@ -702,7 +702,7 @@ inline void parseImpl(DataTypeIPv6::FieldType & x, ReadBuffer & rb template bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateLUTImpl *, bool precise_float_parsing) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (precise_float_parsing) return tryReadFloatTextPrecise(x, rb); @@ -1767,7 +1767,7 @@ struct ConvertImpl else { /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + if constexpr (is_floating_point && !is_floating_point) { if (!isFinite(vec_from[i])) { @@ -2253,9 +2253,9 @@ private: using RightT = typename RightDataType::FieldType; static constexpr bool bad_left = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point || is_big_int_v || is_signed_v; static constexpr bool bad_right = - is_decimal || std::is_floating_point_v || is_big_int_v || is_signed_v; + is_decimal || is_floating_point || is_big_int_v || is_signed_v; /// Disallow int vs UUID conversion (but support int vs UInt128 conversion) if constexpr ((bad_left && std::is_same_v) || @@ -2578,7 +2578,7 @@ struct ToNumberMonotonicity /// Float cases. /// When converting to Float, the conversion is always monotonic. - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) return { .is_monotonic = true, .is_always_monotonic = true }; const auto * low_cardinality = typeid_cast(&type); diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 8a2ad457d34..65c1a6fb2d2 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -741,7 +741,7 @@ public: switch (element.type()) { case ElementType::DOUBLE: - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { /// We permit inaccurate conversion of double to float. /// Example: double 0.1 from JSON is not representable in float. @@ -769,7 +769,7 @@ public: case ElementType::STRING: { auto rb = ReadBufferFromMemory{element.getString()}; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (!tryReadFloatText(value, rb) || !rb.eof()) return false; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index ab62deed45d..46fbe70458d 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -453,7 +453,7 @@ template - using FunctionRoundingImpl = std::conditional_t, + using FunctionRoundingImpl = std::conditional_t, FloatRoundingImpl, IntegerRoundingImpl>; diff --git a/src/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h index 5e13fbbad5c..fd59ea3a9c1 100644 --- a/src/Functions/FunctionsVisitParam.h +++ b/src/Functions/FunctionsVisitParam.h @@ -57,7 +57,7 @@ struct ExtractNumericType ResultType x = 0; if (!in.eof()) { - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) tryReadFloatText(x, in); else tryReadIntText(x, in); diff --git a/src/Functions/abs.cpp b/src/Functions/abs.cpp index 9ac2363f765..3a618686b30 100644 --- a/src/Functions/abs.cpp +++ b/src/Functions/abs.cpp @@ -22,7 +22,7 @@ struct AbsImpl return a < 0 ? static_cast(~a) + 1 : static_cast(a); else if constexpr (is_integer && is_unsigned_v) return static_cast(a); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) return static_cast(std::abs(a)); } diff --git a/src/Functions/array/arrayAggregation.cpp b/src/Functions/array/arrayAggregation.cpp index 03aa5fb9086..9c17e1095c5 100644 --- a/src/Functions/array/arrayAggregation.cpp +++ b/src/Functions/array/arrayAggregation.cpp @@ -85,7 +85,7 @@ struct ArrayAggregateResultImpl std::conditional_t, Decimal128, std::conditional_t, Decimal256, std::conditional_t, Decimal128, - std::conditional_t, Float64, + std::conditional_t, Float64, std::conditional_t, Int64, UInt64>>>>>>>>>>>; }; diff --git a/src/Functions/factorial.cpp b/src/Functions/factorial.cpp index 3b46d9e867f..32bdc84b954 100644 --- a/src/Functions/factorial.cpp +++ b/src/Functions/factorial.cpp @@ -21,7 +21,7 @@ struct FactorialImpl static NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_floating_point_v || is_over_big_int) + if constexpr (is_floating_point || is_over_big_int) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type of argument of function factorial, should not be floating point or big int"); diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 7a6d37d810d..dded3d46652 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -87,7 +87,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b[b_index], res[i]) } @@ -105,7 +105,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b[i], res[i]) } @@ -122,7 +122,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b[b_index], res[i]) } @@ -138,7 +138,7 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b[i], res[i]) } @@ -162,7 +162,7 @@ inline void fillVectorConstant(const ArrayCond & cond, const ArrayA & a, B b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[a_index], b, res[i]) } @@ -178,7 +178,7 @@ inline void fillVectorConstant(const ArrayCond & cond, const ArrayA & a, B b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a[i], b, res[i]) } @@ -200,7 +200,7 @@ inline void fillConstantVector(const ArrayCond & cond, A a, const ArrayB & b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[b_index]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a, b[b_index], res[i]) } @@ -216,7 +216,7 @@ inline void fillConstantVector(const ArrayCond & cond, A a, const ArrayB & b, Ar { if constexpr (is_native_int_or_decimal_v) res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[i]); - else if constexpr (std::is_floating_point_v) + else if constexpr (is_floating_point) { BRANCHFREE_IF_FLOAT(ResultType, cond[i], a, b[i], res[i]) } diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index 4d86442ad7e..cf318db805b 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp index d233e4e4ce2..5a4d1539345 100644 --- a/src/Functions/moduloOrZero.cpp +++ b/src/Functions/moduloOrZero.cpp @@ -17,7 +17,7 @@ struct ModuloOrZeroImpl template static Result apply(A a, B b) { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. return ResultType(a) - trunc(ResultType(a) / ResultType(b)) * ResultType(b); diff --git a/src/Functions/multiply.cpp b/src/Functions/multiply.cpp index 559143a43b4..740ab81d0d9 100644 --- a/src/Functions/multiply.cpp +++ b/src/Functions/multiply.cpp @@ -18,8 +18,8 @@ struct MultiplyImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) * static_cast(static_cast(b)); } diff --git a/src/Functions/plus.cpp b/src/Functions/plus.cpp index 00136e50c5b..26921713f78 100644 --- a/src/Functions/plus.cpp +++ b/src/Functions/plus.cpp @@ -19,8 +19,8 @@ struct PlusImpl /// Next everywhere, static_cast - so that there is no wrong result in expressions of the form Int64 c = UInt32(a) * Int32(-1). if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) + static_cast(static_cast(b)); } diff --git a/src/Functions/sign.cpp b/src/Functions/sign.cpp index 16f0efd2201..a6396a58c0c 100644 --- a/src/Functions/sign.cpp +++ b/src/Functions/sign.cpp @@ -13,7 +13,7 @@ struct SignImpl static NO_SANITIZE_UNDEFINED ResultType apply(A a) { - if constexpr (is_decimal || is_floating_point_v) + if constexpr (is_decimal || is_floating_point) return a < A(0) ? -1 : a == A(0) ? 0 : 1; else if constexpr (is_signed_v) return a < 0 ? -1 : a == 0 ? 0 : 1; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 6dda5a9b089..f1fcbb07af5 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1382,7 +1382,7 @@ inline bool tryReadText(IPv4 & x, ReadBuffer & buf) { return tryReadIPv4Text(x, inline bool tryReadText(IPv6 & x, ReadBuffer & buf) { return tryReadIPv6Text(x, buf); } template -requires is_floating_point_v +requires is_floating_point inline void readText(T & x, ReadBuffer & buf) { readFloatText(x, buf); } inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index cdeabfcf352..a4eefeaffe2 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -150,7 +150,7 @@ inline void writeBoolText(bool x, WriteBuffer & buf) template -requires is_floating_point_v +requires is_floating_point inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; @@ -182,7 +182,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } template -requires is_floating_point_v +requires is_floating_point inline void writeFloatText(T x, WriteBuffer & buf) { using Converter = DoubleConverter; @@ -530,7 +530,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) bool is_finite = isFinite(x); const bool need_quote = (is_integer && (sizeof(T) >= 8) && settings.json.quote_64bit_integers) - || (settings.json.quote_denormals && !is_finite) || (is_floating_point_v && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); + || (settings.json.quote_denormals && !is_finite) || (is_floating_point && (sizeof(T) >= 8) && settings.json.quote_64bit_floats); if (need_quote) writeChar('"', ostr); @@ -541,7 +541,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) writeCString("null", ostr); else { - if constexpr (is_floating_point_v) + if constexpr (is_floating_point) { if (std::signbit(x)) { @@ -1065,7 +1065,7 @@ inline void writeText(is_integer auto x, WriteBuffer & buf) } template -requires is_floating_point_v +requires is_floating_point inline void writeText(T x, WriteBuffer & buf) { writeFloatText(x, buf); } inline void writeText(is_enum auto x, WriteBuffer & buf) { writeText(magic_enum::enum_name(x), buf); } diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 9785ba46dab..c5ffbb96d6f 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -183,7 +183,7 @@ private: if (sorted.load(std::memory_order_relaxed)) return; - if constexpr (std::is_arithmetic_v && !std::is_floating_point_v) + if constexpr (std::is_arithmetic_v && !std::is_floating_point) { if (likely(entries.size() > 256)) { From bf2a8f6a7f6eb8073b60468058f8259cf4a4f341 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 2 Jun 2024 20:43:02 +0200 Subject: [PATCH 004/267] Preparation --- base/base/BFloat16.h | 17 +- src/AggregateFunctions/AggregateFunctionSum.h | 13 +- .../AggregateFunctionUniq.h | 2 +- src/Core/DecimalFunctions.h | 10 +- src/Core/iostream_debug_helpers.cpp | 149 ------------------ src/Core/iostream_debug_helpers.h | 49 ------ src/DataTypes/DataTypesDecimal.cpp | 15 +- src/Dictionaries/RangeHashedDictionary.h | 3 +- src/Functions/FunctionsRound.h | 3 +- src/Functions/array/mapPopulateSeries.cpp | 32 ++-- src/Functions/exp.cpp | 9 +- src/Functions/log.cpp | 9 +- src/Functions/minus.cpp | 4 +- src/Functions/sigmoid.cpp | 10 +- src/Functions/tanh.cpp | 9 +- src/IO/WriteHelpers.h | 14 +- src/Interpreters/RowRefs.cpp | 2 +- src/Parsers/iostream_debug_helpers.cpp | 35 ---- src/Parsers/iostream_debug_helpers.h | 17 -- 19 files changed, 110 insertions(+), 292 deletions(-) delete mode 100644 src/Core/iostream_debug_helpers.cpp delete mode 100644 src/Core/iostream_debug_helpers.h delete mode 100644 src/Parsers/iostream_debug_helpers.cpp delete mode 100644 src/Parsers/iostream_debug_helpers.h diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 17c3ebe9ef3..99eab5c67cb 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -1,9 +1,22 @@ #pragma once +#include + + using BFloat16 = __bf16; namespace std { - inline constexpr bool isfinite(BFloat16) { return true; } - inline constexpr bool signbit(BFloat16) { return false; } + inline constexpr bool isfinite(BFloat16 x) { return (bit_cast(x) & 0b0111111110000000) != 0b0111111110000000; } + inline constexpr bool signbit(BFloat16 x) { return bit_cast(x) & 0b1000000000000000; } +} + +inline Float32 BFloat16ToFloat32(BFloat16 x) +{ + return bit_cast(static_cast(bit_cast(x)) << 16); +} + +inline BFloat16 Float32ToBFloat16(Float32 x) +{ + return bit_cast(std::bit_cast(x) >> 16); } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index d0d600be70b..f6c51241a5c 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -193,12 +193,11 @@ struct AggregateFunctionSumData Impl::add(sum, local_sum); return; } - else if constexpr (is_floating_point) + else if constexpr (is_floating_point && (sizeof(Value) == 4 || sizeof(Value) == 8)) { - /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned + /// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned /// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep) - static_assert(sizeof(Value) == 4 || sizeof(Value) == 8); - using equivalent_integer = typename std::conditional_t; + using EquivalentInteger = typename std::conditional_t; constexpr size_t unroll_count = 128 / sizeof(T); T partial_sums[unroll_count]{}; @@ -209,11 +208,11 @@ struct AggregateFunctionSumData { for (size_t i = 0; i < unroll_count; ++i) { - equivalent_integer value; - std::memcpy(&value, &ptr[i], sizeof(Value)); + EquivalentInteger value; + memcpy(&value, &ptr[i], sizeof(Value)); value &= (!condition_map[i] != add_if_zero) - 1; Value d; - std::memcpy(&d, &value, sizeof(Value)); + memcpy(&d, &value, sizeof(Value)); Impl::add(partial_sums[i], d); } ptr += unroll_count; diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index cef23f766c7..cd2d3c1eb18 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -257,7 +257,7 @@ template struct AggregateFunctionUniqTraits { static UInt64 hash(T x) { - if constexpr (std::is_same_v || std::is_same_v) + if constexpr (is_floating_point) { return bit_cast(x); } diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 435cef61145..abd660a8a7f 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -17,6 +17,7 @@ class DataTypeNumber; namespace ErrorCodes { + extern const int NOT_IMPLEMENTED; extern const int DECIMAL_OVERFLOW; extern const int ARGUMENT_OUT_OF_BOUND; } @@ -310,7 +311,14 @@ ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; - if constexpr (is_floating_point) + if constexpr (std::is_same_v) + { + if constexpr (throw_exception) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from Decimal to BFloat16 is not implemented"); + else + return ReturnType(false); + } + else if constexpr (is_floating_point) { result = static_cast(decimal.value) / static_cast(scaleMultiplier(scale)); } diff --git a/src/Core/iostream_debug_helpers.cpp b/src/Core/iostream_debug_helpers.cpp deleted file mode 100644 index 38e61ac4fca..00000000000 --- a/src/Core/iostream_debug_helpers.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include "iostream_debug_helpers.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -template <> -std::ostream & operator<< (std::ostream & stream, const Field & what) -{ - stream << applyVisitor(FieldVisitorDump(), what); - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what) -{ - stream << "NameAndTypePair(name = " << what.name << ", type = " << what.type << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IDataType & what) -{ - stream << "IDataType(name = " << what.getName() << ", default = " << what.getDefault() << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IStorage & what) -{ - auto table_id = what.getStorageID(); - stream << "IStorage(name = " << what.getName() << ", tableName = " << table_id.table_name << ") {" - << what.getInMemoryMetadataPtr()->getColumns().getAllPhysical().toString() << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const TableLockHolder &) -{ - stream << "TableStructureReadLock()"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what) -{ - stream << "IFunction(name = " << what.getName() << ", variadic = " << what.isVariadic() << ", args = " << what.getNumberOfArguments() - << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Block & what) -{ - stream << "Block(" - << "num_columns = " << what.columns() << "){" << what.dumpStructure() << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what) -{ - stream << "ColumnWithTypeAndName(name = " << what.name << ", type = " << *what.type << ", column = "; - return dumpValue(stream, what.column) << ")"; -} - -std::ostream & operator<<(std::ostream & stream, const IColumn & what) -{ - stream << "IColumn(" << what.dumpStructure() << ")"; - stream << "{"; - for (size_t i = 0; i < what.size(); ++i) - { - if (i) - stream << ", "; - stream << applyVisitor(FieldVisitorDump(), what[i]); - } - stream << "}"; - - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Packet & what) -{ - stream << "Packet(" - << "type = " << what.type; - // types description: Core/Protocol.h - if (what.exception) - stream << "exception = " << what.exception.get(); - // TODO: profile_info - stream << ") {" << what.block << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what) -{ - stream << "ExpressionActions(" << what.dumpActions() << ")"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what) -{ - stream << "SyntaxAnalyzerResult{"; - stream << "storage=" << what.storage << "; "; - if (!what.source_columns.empty()) - { - stream << "source_columns="; - dumpValue(stream, what.source_columns); - stream << "; "; - } - if (!what.aliases.empty()) - { - stream << "aliases="; - dumpValue(stream, what.aliases); - stream << "; "; - } - if (!what.array_join_result_to_source.empty()) - { - stream << "array_join_result_to_source="; - dumpValue(stream, what.array_join_result_to_source); - stream << "; "; - } - if (!what.array_join_alias_to_name.empty()) - { - stream << "array_join_alias_to_name="; - dumpValue(stream, what.array_join_alias_to_name); - stream << "; "; - } - if (!what.array_join_name_to_alias.empty()) - { - stream << "array_join_name_to_alias="; - dumpValue(stream, what.array_join_name_to_alias); - stream << "; "; - } - stream << "rewrite_subqueries=" << what.rewrite_subqueries << "; "; - stream << "}"; - - return stream; -} - -} diff --git a/src/Core/iostream_debug_helpers.h b/src/Core/iostream_debug_helpers.h deleted file mode 100644 index e40bf74583e..00000000000 --- a/src/Core/iostream_debug_helpers.h +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once -#include - -namespace DB -{ - -// Use template to disable implicit casting for certain overloaded types such as Field, which leads -// to overload resolution ambiguity. -class Field; -template -requires std::is_same_v -std::ostream & operator<<(std::ostream & stream, const T & what); - -struct NameAndTypePair; -std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what); - -class IDataType; -std::ostream & operator<<(std::ostream & stream, const IDataType & what); - -class IStorage; -std::ostream & operator<<(std::ostream & stream, const IStorage & what); - -class IFunctionOverloadResolver; -std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what); - -class IFunctionBase; -std::ostream & operator<<(std::ostream & stream, const IFunctionBase & what); - -class Block; -std::ostream & operator<<(std::ostream & stream, const Block & what); - -struct ColumnWithTypeAndName; -std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what); - -class IColumn; -std::ostream & operator<<(std::ostream & stream, const IColumn & what); - -struct Packet; -std::ostream & operator<<(std::ostream & stream, const Packet & what); - -class ExpressionActions; -std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what); - -struct TreeRewriterResult; -std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what); -} - -/// some operator<< should be declared before operator<<(... std::shared_ptr<>) -#include diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index d87eff97675..e0304e46b05 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -20,6 +20,7 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int DECIMAL_OVERFLOW; + extern const int NOT_IMPLEMENTED; } @@ -262,15 +263,19 @@ FOR_EACH_ARITHMETIC_TYPE(INVOKE); template requires (is_arithmetic_v && IsDataTypeDecimal) -ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType & result) +ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value*/, UInt32 /*scale*/, typename ToDataType::FieldType & /*result*/) { - using FromFieldType = typename FromDataType::FieldType; +/* using FromFieldType = typename FromDataType::FieldType; using ToFieldType = typename ToDataType::FieldType; using ToNativeType = typename ToFieldType::NativeType; static constexpr bool throw_exception = std::is_same_v; - if constexpr (is_floating_point) + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from BFloat16 to Decimal is not implemented"); + } + else if constexpr (is_floating_point) { if (!isFinite(value)) { @@ -302,7 +307,9 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); else return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); - } + }*/ + + return ReturnType(); } #define DISPATCH(FROM_DATA_TYPE, TO_DATA_TYPE) \ diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index bf004dbe32b..4950e7c8ee6 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -298,7 +298,8 @@ namespace impl using Types = std::decay_t; using DataType = typename Types::LeftType; - if constexpr (IsDataTypeDecimalOrNumber || IsDataTypeDateOrDateTime || IsDataTypeEnum) + if constexpr ((IsDataTypeDecimalOrNumber || IsDataTypeDateOrDateTime || IsDataTypeEnum) + && !std::is_same_v) { using ColumnType = typename DataType::ColumnType; func(TypePair()); diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 46fbe70458d..7eea0d74975 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -579,7 +579,8 @@ public: using Types = std::decay_t; using DataType = typename Types::LeftType; - if constexpr (IsDataTypeNumber || IsDataTypeDecimal) + if constexpr ((IsDataTypeNumber || IsDataTypeDecimal) + && !std::is_same_v) { using FieldType = typename DataType::FieldType; res = Dispatcher::apply(column.column.get(), scale_arg); diff --git a/src/Functions/array/mapPopulateSeries.cpp b/src/Functions/array/mapPopulateSeries.cpp index 0db71ab2cf8..759696147c3 100644 --- a/src/Functions/array/mapPopulateSeries.cpp +++ b/src/Functions/array/mapPopulateSeries.cpp @@ -453,23 +453,29 @@ private: using ValueType = typename Types::RightType; static constexpr bool key_and_value_are_numbers = IsDataTypeNumber && IsDataTypeNumber; - static constexpr bool key_is_float = std::is_same_v || std::is_same_v; - if constexpr (key_and_value_are_numbers && !key_is_float) + if constexpr (key_and_value_are_numbers) { - using KeyFieldType = typename KeyType::FieldType; - using ValueFieldType = typename ValueType::FieldType; + if constexpr (is_floating_point) + { + return false; + } + else + { + using KeyFieldType = typename KeyType::FieldType; + using ValueFieldType = typename ValueType::FieldType; - executeImplTyped( - input.key_column, - input.value_column, - input.offsets_column, - input.max_key_column, - std::move(result_columns.result_key_column), - std::move(result_columns.result_value_column), - std::move(result_columns.result_offset_column)); + executeImplTyped( + input.key_column, + input.value_column, + input.offsets_column, + input.max_key_column, + std::move(result_columns.result_key_column), + std::move(result_columns.result_value_column), + std::move(result_columns.result_offset_column)); - return true; + return true; + } } return false; diff --git a/src/Functions/exp.cpp b/src/Functions/exp.cpp index d352cda7460..9b8207afe30 100644 --- a/src/Functions/exp.cpp +++ b/src/Functions/exp.cpp @@ -21,7 +21,14 @@ namespace template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Exp(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Exp(src, size, dst); + } } }; } diff --git a/src/Functions/log.cpp b/src/Functions/log.cpp index 9096b8c6f22..d5e10c90c83 100644 --- a/src/Functions/log.cpp +++ b/src/Functions/log.cpp @@ -20,7 +20,14 @@ struct LogName { static constexpr auto name = "log"; }; template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Log(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Log(src, size, dst); + } } }; diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index cf318db805b..a372e8d5d78 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -17,8 +17,8 @@ struct MinusImpl { if constexpr (is_big_int_v || is_big_int_v) { - using CastA = std::conditional_t, B, A>; - using CastB = std::conditional_t, A, B>; + using CastA = std::conditional_t, B, A>; + using CastB = std::conditional_t, A, B>; return static_cast(static_cast(a)) - static_cast(static_cast(b)); } diff --git a/src/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp index d121bdc7389..1179329845d 100644 --- a/src/Functions/sigmoid.cpp +++ b/src/Functions/sigmoid.cpp @@ -21,7 +21,14 @@ namespace template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Sigmoid<>(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Sigmoid<>(src, size, dst); + } } }; } @@ -47,4 +54,3 @@ REGISTER_FUNCTION(Sigmoid) } } - diff --git a/src/Functions/tanh.cpp b/src/Functions/tanh.cpp index bdefa5263d7..49788b31970 100644 --- a/src/Functions/tanh.cpp +++ b/src/Functions/tanh.cpp @@ -19,7 +19,14 @@ struct TanhName { static constexpr auto name = "tanh"; }; template static void execute(const T * src, size_t size, T * dst) { - NFastOps::Tanh<>(src, size, dst); + if constexpr (std::is_same_v) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function `{}` is not implemented for BFloat16", name); + } + else + { + NFastOps::Tanh<>(src, size, dst); + } } }; diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index a4eefeaffe2..d2e2868b245 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -155,7 +155,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) { Int64 result = 0; - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { /// The library Ryu has low performance on integers. /// This workaround improves performance 6..10 times. @@ -165,10 +165,16 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) else result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; } - else + else if constexpr (std::is_same_v) { - /// This will support 16-bit floats as well. - float f32 = x; + if (DecomposedFloat32(x).isIntegerInRepresentableRange()) + result = itoa(Int32(x), buffer) - buffer; + else + result = jkj::dragonbox::to_chars_n(x, buffer) - buffer; + } + else if constexpr (std::is_same_v) + { + Float32 f32 = BFloat16ToFloat32(x); if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) result = itoa(Int32(f32), buffer) - buffer; diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index c5ffbb96d6f..a0fad8840e6 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -183,7 +183,7 @@ private: if (sorted.load(std::memory_order_relaxed)) return; - if constexpr (std::is_arithmetic_v && !std::is_floating_point) + if constexpr (std::is_arithmetic_v && !is_floating_point) { if (likely(entries.size() > 256)) { diff --git a/src/Parsers/iostream_debug_helpers.cpp b/src/Parsers/iostream_debug_helpers.cpp deleted file mode 100644 index b74d337b22d..00000000000 --- a/src/Parsers/iostream_debug_helpers.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "iostream_debug_helpers.h" -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -std::ostream & operator<<(std::ostream & stream, const Token & what) -{ - stream << "Token (type="<< static_cast(what.type) <<"){"<< std::string{what.begin, what.end} << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const Expected & what) -{ - stream << "Expected {variants="; - dumpValue(stream, what.variants) - << "; max_parsed_pos=" << what.max_parsed_pos << "}"; - return stream; -} - -std::ostream & operator<<(std::ostream & stream, const IAST & what) -{ - WriteBufferFromOStream buf(stream, 4096); - buf << "IAST{"; - what.dumpTree(buf); - buf << "}"; - return stream; -} - -} diff --git a/src/Parsers/iostream_debug_helpers.h b/src/Parsers/iostream_debug_helpers.h deleted file mode 100644 index 39f52ebcbc2..00000000000 --- a/src/Parsers/iostream_debug_helpers.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once -#include - -namespace DB -{ -struct Token; -std::ostream & operator<<(std::ostream & stream, const Token & what); - -struct Expected; -std::ostream & operator<<(std::ostream & stream, const Expected & what); - -class IAST; -std::ostream & operator<<(std::ostream & stream, const IAST & what); - -} - -#include From 874116a107918a182afd3e5e3c3c9aa2f898cafa Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 14:53:48 +0100 Subject: [PATCH 005/267] impl --- src/Interpreters/ConcurrentHashJoin.cpp | 53 ++++---- src/Interpreters/ConcurrentHashJoin.h | 3 +- src/Interpreters/HashJoin/AddedColumns.cpp | 12 +- src/Interpreters/HashJoin/AddedColumns.h | 17 ++- src/Interpreters/HashJoin/HashJoin.cpp | 95 +++++++++++--- src/Interpreters/HashJoin/HashJoin.h | 132 +++++++++++++++++++ src/Interpreters/HashJoin/HashJoinMethods.h | 135 ++++++++++---------- 7 files changed, 329 insertions(+), 118 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index ac940c62a1a..08f4f422496 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -24,6 +24,8 @@ #include #include +using namespace DB; + namespace ProfileEvents { extern const Event HashJoinPreallocatedElementsInHashTables; @@ -53,6 +55,19 @@ void updateStatistics(const auto & hash_joins, const DB::StatsCollectingParams & DB::getHashTablesStatistics().update(sum_of_sizes, *median_size, params); } +Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) +{ + Blocks inner_blocks; + for (const auto & block : blocks) + { + chassert(!block.wasScattered()); + if (block.wasScattered()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Not scattered block is expected here"); + inner_blocks.push_back(*block.block); + } + return concatenateBlocks(inner_blocks); +} + } namespace DB @@ -165,7 +180,7 @@ ConcurrentHashJoin::~ConcurrentHashJoin() bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block, bool check_limits) { - Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); + auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); size_t blocks_left = 0; for (const auto & block : dispatched_blocks) @@ -193,7 +208,7 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block, bool check_li bool limit_exceeded = !hash_join->data->addBlockToJoin(dispatched_block, check_limits); - dispatched_block = {}; + dispatched_block = Block{}; blocks_left--; if (limit_exceeded) @@ -209,7 +224,7 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block, bool check_li void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & /*not_processed*/) { - Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); + auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); block = {}; for (size_t i = 0; i < dispatched_blocks.size(); ++i) { @@ -221,7 +236,7 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & throw Exception(ErrorCodes::LOGICAL_ERROR, "not_processed should be empty"); } - block = concatenateBlocks(dispatched_blocks); + block = ::concatenateBlocks(dispatched_blocks); } void ConcurrentHashJoin::checkTypesOfKeys(const Block & block) const @@ -300,10 +315,9 @@ static ALWAYS_INLINE IColumn::Selector hashToSelector(const WeakHash32 & hash, s return selector; } -IColumn::Selector ConcurrentHashJoin::selectDispatchBlock(const Strings & key_columns_names, const Block & from_block) +IColumn::Selector selectDispatchBlock(size_t num_shards, const Strings & key_columns_names, const Block & from_block) { size_t num_rows = from_block.rows(); - size_t num_shards = hash_joins.size(); WeakHash32 hash(num_rows); for (const auto & key_name : key_columns_names) @@ -315,27 +329,22 @@ IColumn::Selector ConcurrentHashJoin::selectDispatchBlock(const Strings & key_co return hashToSelector(hash, num_shards); } -Blocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) +HashJoin::ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) { - /// TODO: use JoinCommon::scatterBlockByHash size_t num_shards = hash_joins.size(); - size_t num_cols = from_block.columns(); - - IColumn::Selector selector = selectDispatchBlock(key_columns_names, from_block); - - Blocks result(num_shards); + IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); + std::vector selectors(num_shards); for (size_t i = 0; i < num_shards; ++i) - result[i] = from_block.cloneEmpty(); - - for (size_t i = 0; i < num_cols; ++i) + selectors[i].reserve(selector.size() / num_shards + 1); + for (size_t i = 0; i < selector.size(); ++i) { - auto dispatched_columns = from_block.getByPosition(i).column->scatter(num_shards, selector); - assert(result.size() == dispatched_columns.size()); - for (size_t block_index = 0; block_index < num_shards; ++block_index) - { - result[block_index].getByPosition(i).column = std::move(dispatched_columns[block_index]); - } + const size_t shard = selector[i]; + selectors[shard].push_back(i); } + HashJoin::ScatteredBlocks result; + result.reserve(num_shards); + for (size_t i = 0; i < num_shards; ++i) + result.emplace_back(from_block, std::move(selectors[i])); return result; } diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index a911edaccc3..8e75bcd874b 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -78,8 +78,7 @@ private: std::mutex totals_mutex; Block totals; - IColumn::Selector selectDispatchBlock(const Strings & key_columns_names, const Block & from_block); - Blocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); + HashJoin::ScatteredBlocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); }; UInt64 calculateCacheKey(std::shared_ptr & table_join, const QueryTreeNodePtr & right_table_expression); diff --git a/src/Interpreters/HashJoin/AddedColumns.cpp b/src/Interpreters/HashJoin/AddedColumns.cpp index 930a352744d..45185021ded 100644 --- a/src/Interpreters/HashJoin/AddedColumns.cpp +++ b/src/Interpreters/HashJoin/AddedColumns.cpp @@ -3,14 +3,16 @@ namespace DB { -JoinOnKeyColumns::JoinOnKeyColumns(const Block & block, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_) - : key_names(key_names_) - , materialized_keys_holder(JoinCommon::materializeColumns( - block, key_names)) /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. +JoinOnKeyColumns::JoinOnKeyColumns( + const HashJoin::ScatteredBlock & block_, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_) + : block(block_) + , key_names(key_names_) + /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. + , materialized_keys_holder(JoinCommon::materializeColumns(*block.block, key_names)) , key_columns(JoinCommon::getRawPointers(materialized_keys_holder)) , null_map(nullptr) , null_map_holder(extractNestedColumnsAndNullMap(key_columns, null_map)) - , join_mask_column(JoinCommon::getColumnAsMask(block, cond_column_name)) + , join_mask_column(JoinCommon::getColumnAsMask(*block.block, cond_column_name)) , key_sizes(key_sizes_) { } diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 13a7df6f498..daba6c4f2e0 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -14,6 +14,8 @@ using ExpressionActionsPtr = std::shared_ptr; struct JoinOnKeyColumns { + const HashJoin::ScatteredBlock & block; + Names key_names; Columns materialized_keys_holder; @@ -27,9 +29,14 @@ struct JoinOnKeyColumns Sizes key_sizes; - explicit JoinOnKeyColumns(const Block & block, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_); + JoinOnKeyColumns( + const HashJoin::ScatteredBlock & block, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_); - bool isRowFiltered(size_t i) const { return join_mask_column.isRowFiltered(i); } + bool isRowFiltered(size_t i) const + { + chassert(std::ranges::find(block.selector, i) != block.selector.end(), fmt::format("Row {} is not in block", i)); + return join_mask_column.isRowFiltered(i); + } }; template @@ -55,7 +62,7 @@ public: }; AddedColumns( - const Block & left_block_, + const HashJoin::ScatteredBlock & left_block_, const Block & block_with_columns_to_add, const Block & saved_block_sample, const HashJoin & join, @@ -63,7 +70,8 @@ public: ExpressionActionsPtr additional_filter_expression_, bool is_asof_join, bool is_join_get_) - : left_block(left_block_) + : src_block(left_block_) + , left_block(*left_block_.block) , join_on_keys(join_on_keys_) , additional_filter_expression(additional_filter_expression_) , rows_to_add(left_block.rows()) @@ -134,6 +142,7 @@ public: const IColumn & leftAsofKey() const { return *left_asof_key; } + const HashJoin::ScatteredBlock & src_block; Block left_block; std::vector join_on_keys; ExpressionActionsPtr additional_filter_expression; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 0c7cad4360d..e14b4d5bb01 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -441,15 +441,21 @@ Block HashJoin::prepareRightBlock(const Block & block) const return prepareRightBlock(block, savedBlockSample()); } -bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) +bool HashJoin::addBlockToJoin(const Block & source_block, bool check_limits) +{ + auto scattered_block = ScatteredBlock{source_block}; + return addBlockToJoin(scattered_block, check_limits); +} + +bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) { if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Join data was released"); /// RowRef::SizeT is uint32_t (not size_t) for hash table Cell memory efficiency. /// It's possible to split bigger blocks and insert them by parts here. But it would be a dead code. - if (unlikely(source_block_.rows() > std::numeric_limits::max())) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Too many rows in right table block for HashJoin: {}", source_block_.rows()); + if (unlikely(source_block.rows() > std::numeric_limits::max())) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Too many rows in right table block for HashJoin: {}", source_block.rows()); /** We do not allocate memory for stored blocks inside HashJoin, only for hash table. * In case when we have all the blocks allocated before the first `addBlockToJoin` call, will already be quite high. @@ -458,7 +464,6 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) if (!memory_usage_before_adding_blocks) memory_usage_before_adding_blocks = getCurrentQueryMemoryUsage(); - Block source_block = source_block_; if (strictness == JoinStrictness::Asof) { chassert(kind == JoinKind::Left || kind == JoinKind::Inner); @@ -485,8 +490,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) for (size_t i = 0; i < asof_column_nullable.size(); ++i) negative_null_map[i] = !asof_column_nullable[i]; - for (auto & column : source_block) - column.column = column.column->filter(negative_null_map, -1); + source_block.filter(negative_null_map); } } } @@ -498,10 +502,11 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) for (const auto & column_name : right_key_names) { const auto & column = source_block.getByName(column_name).column; + /// TODO: do it once for the original block before splitting all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); } - Block block_to_save = prepareRightBlock(source_block); + Block block_to_save = prepareRightBlock(*source_block.block); if (shrink_blocks) block_to_save = block_to_save.shrinkToFit(); @@ -512,6 +517,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) && (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { + chassert(!source_block.wasScattered()); /// We don't run parallel_hash for cross join if (tmp_stream == nullptr) { tmp_stream = &tmp_data->createStream(right_sample_block); @@ -535,11 +541,14 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) && ((min_bytes_to_compress && getTotalByteCount() >= min_bytes_to_compress) || (min_rows_to_compress && getTotalRowCount() >= min_rows_to_compress))) { + chassert(!source_block.wasScattered()); /// We don't run parallel_hash for cross join block_to_save = block_to_save.compress(); have_compressed = true; } - data->blocks_allocated_size += block_to_save.allocatedBytes(); + /// In case of scattered block we account proportional share of the source block bytes. + /// For not scattered columns it will be trivial (bytes * N / N) calculation. + data->blocks_allocated_size += block_to_save.rows() ? block_to_save.allocatedBytes() * rows / block_to_save.rows() : 0; data->blocks.emplace_back(std::move(block_to_save)); Block * stored_block = &data->blocks.back(); @@ -567,7 +576,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) save_nullmap |= (*null_map)[i]; } - auto join_mask_col = JoinCommon::getColumnAsMask(source_block, onexprs[onexpr_idx].condColumnNames().second); + auto join_mask_col = JoinCommon::getColumnAsMask(*source_block.block, onexprs[onexpr_idx].condColumnNames().second); /// Save blocks that do not hold conditions in ON section ColumnUInt8::MutablePtr not_joined_map = nullptr; if (!flag_per_row && isRightOrFull(kind) && join_mask_col.hasData()) @@ -592,27 +601,31 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) bool is_inserted = false; if (kind != JoinKind::Cross) { - joinDispatch(kind, strictness, data->maps[onexpr_idx], [&](auto kind_, auto strictness_, auto & map) - { - size_t size = HashJoinMethods>::insertFromBlockImpl( + joinDispatch( + kind, + strictness, + data->maps[onexpr_idx], + [&](auto kind_, auto strictness_, auto & map) + { + size_t size = HashJoinMethods>::insertFromBlockImpl( *this, data->type, map, - rows, key_columns, key_sizes[onexpr_idx], stored_block, + source_block.selector, null_map, join_mask_col.getData(), data->pool, is_inserted); - if (flag_per_row) - used_flags->reinit(stored_block); - else if (is_inserted) - /// Number of buckets + 1 value from zero storage - used_flags->reinit(size + 1); - }); + if (flag_per_row) + used_flags->reinit(stored_block); + else if (is_inserted) + /// Number of buckets + 1 value from zero storage + used_flags->reinit(size + 1); + }); } if (!flag_per_row && save_nullmap && is_inserted) @@ -933,6 +946,50 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) } } +void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) +{ + if (!data) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); + + for (const auto & onexpr : table_join->getClauses()) + { + auto cond_column_name = onexpr.condColumnNames(); + JoinCommon::checkTypesOfKeys( + *block.block, + onexpr.key_names_left, + cond_column_name.first, + right_sample_block, + onexpr.key_names_right, + cond_column_name.second); + } + + chassert(kind == JoinKind::Left || kind == JoinKind::Inner); + + std::vectormaps[0])> *> maps_vector; + for (size_t i = 0; i < table_join->getClauses().size(); ++i) + maps_vector.push_back(&data->maps[i]); + + if (joinDispatch( + kind, + strictness, + maps_vector, + [&](auto kind_, auto strictness_, auto & maps_vector_) + { + using MapType = typename MapGetter::Map; + ScatteredBlock remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + if (remaining_block.rows()) + not_processed = std::make_shared(ExtraBlock{std::move(*remaining_block.block)}); + else + not_processed.reset(); + })) + { + /// Joined + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: {} {}", strictness, kind); +} + HashJoin::~HashJoin() { if (!data) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 0b115b9fdbb..008a1425c4e 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -29,6 +29,8 @@ #include #include +#include + namespace DB { @@ -137,11 +139,138 @@ public: return std::make_shared(table_join_, right_sample_block_, any_take_last_row, reserve_num, instance_id); } + struct ScatteredBlock : private boost::noncopyable + { + BlockPtr block; // TODO: we don't need shared_ptr here since if any changes are made to block, they're supposed to be private + IColumn::Selector selector; + + ScatteredBlock(const Block & block_) : block(std::make_shared(block_)), selector(createTrivialSelector(block->rows())) { } + + ScatteredBlock(const Block & block_, IColumn::Selector && selector_) + : block(std::make_shared(block_)), selector(std::move(selector_)) + { + } + + ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) + { + other.block = nullptr; + other.selector.clear(); + } + + ScatteredBlock & operator=(ScatteredBlock && other) noexcept + { + if (this != &other) + { + block = std::move(other.block); + selector = std::move(other.selector); + + other.block = nullptr; + other.selector.clear(); + } + return *this; + } + + operator bool() const { return block && *block; } + + /// Accounts only selected rows + size_t rows() const { return selector.size(); } + + /// Whether block was scattered, i.e. has non-trivial selector + bool wasScattered() const + { + chassert(block); + return selector.size() != block->rows(); + } + + const ColumnWithTypeAndName & getByName(const std::string & name) const + { + chassert(block); + return block->getByName(name); + } + + /// Filters selector by mask discarding rows for which filter is false + void filter(const IColumn::Filter & filter) + { + chassert(block && block->rows() == filter.size()); + auto it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); + selector.resize(std::distance(selector.begin(), it)); + } + + /// Applies selector to block in place + void filterBySelector() + { + chassert(block); + auto columns = block->getColumns(); + for (auto & col : columns) + { + auto c = col->cloneEmpty(); + c->reserve(selector.size()); + /// TODO: create new method in IColumnHelper to devirtualize + for (const auto idx : selector) + c->insertFrom(*col, idx); + col = std::move(c); + } + + *this = ScatteredBlock{block->cloneWithColumns(std::move(columns))}; + } + + /// Cut first num_rows rows from block in place and returns block with remaining rows + ScatteredBlock cut(size_t num_rows) + { + SCOPE_EXIT(filterBySelector()); + + if (num_rows >= rows()) + return Block{}; + + chassert(block); + + IColumn::Selector remaining_selector(selector.begin() + num_rows, selector.end()); + auto remaining = ScatteredBlock{*block, std::move(remaining_selector)}; + + selector.erase(selector.begin() + num_rows, selector.end()); + + return remaining; + } + + void replicate(const IColumn::Offsets & offsets, size_t existing_columns, const std::vector & right_keys_to_replicate) + { + chassert(block); + chassert(offsets.size() == rows()); + + auto columns = block->getColumns(); + for (size_t i = 0; i < existing_columns; ++i) + { + auto c = columns[i]->replicate(offsets); + columns[i] = std::move(c); + } + for (size_t pos : right_keys_to_replicate) + { + auto c = columns[pos]->replicate(offsets); + columns[pos] = std::move(c); + } + + *this = ScatteredBlock{block->cloneWithColumns(std::move(columns))}; + } + + // private: + IColumn::Selector createTrivialSelector(size_t size) + { + IColumn::Selector res(size); + std::iota(res.begin(), res.end(), 0); + return res; + } + }; + + using ScatteredBlocks = std::vector; + /** Add block of data from right hand of JOIN to the map. * Returns false, if some limit was exceeded and you should not insert more data. */ bool addBlockToJoin(const Block & source_block_, bool check_limits) override; + /// Called directly from ConcurrentJoin::addBlockToJoin + bool addBlockToJoin(ScatteredBlock & source_block_, bool check_limits); + void checkTypesOfKeys(const Block & block) const override; /** Join data from the map (that was previously built by calls to addBlockToJoin) to the block with data from "left" table. @@ -149,6 +278,9 @@ public: */ void joinBlock(Block & block, ExtraBlockPtr & not_processed) override; + /// Called directly from ConcurrentJoin::joinBlock + void joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed); + /// Check joinGet arguments and infer the return type. DataTypePtr joinGetCheckAndGetReturnType(const DataTypes & data_types, const String & column_name, bool or_null) const; diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 0dfafa94efc..317dde83314 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -74,10 +74,10 @@ public: HashJoin & join, HashJoin::Type type, MapsTemplate & maps, - size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, + const IColumn::Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, @@ -92,14 +92,15 @@ public: is_inserted = true; return 0; - #define M(TYPE) \ - case HashJoin::Type::TYPE: \ - return insertFromBlockImplTypeCase>::Type>(\ - join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \ - break; +#define M(TYPE) \ + case HashJoin::Type::TYPE: \ + return insertFromBlockImplTypeCase< \ + typename KeyGetterForType>::Type>( \ + join, *maps.TYPE, key_columns, key_sizes, stored_block, selector, null_map, join_mask, pool, is_inserted); \ + break; - APPLY_FOR_JOIN_VARIANTS(M) - #undef M + APPLY_FOR_JOIN_VARIANTS(M) +#undef M } } @@ -111,6 +112,21 @@ public: const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get = false) + { + HashJoin::ScatteredBlock scattered_block{block}; + auto ret = joinBlockImpl(join, scattered_block, block_with_columns_to_add, maps_, is_join_get); + ret.filterBySelector(); + scattered_block.filterBySelector(); + block = std::move(*scattered_block.block); + return *ret.block; + } + + static HashJoin::ScatteredBlock joinBlockImpl( + const HashJoin & join, + HashJoin::ScatteredBlock & block, + const Block & block_with_columns_to_add, + const MapsTemplateVector & maps_, + bool is_join_get = false) { constexpr JoinFeatures join_features; @@ -121,7 +137,7 @@ public: const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); } - size_t existing_columns = block.columns(); + size_t existing_columns = block.block->columns(); /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. * Because if they are constants, then in the "not joined" rows, they may have different values @@ -129,7 +145,8 @@ public: */ if constexpr (join_features.right || join_features.full) { - materializeBlockInplace(block); + /// TODO: do materialization once before scattering the source block by hash + materializeBlockInplace(*block.block); } /** For LEFT/INNER JOIN, the saved blocks do not contain keys. @@ -155,23 +172,24 @@ public: else added_columns.reserve(join_features.need_replication); - size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); + const size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); /// Do not hold memory for join_on_keys anymore added_columns.join_on_keys.clear(); - Block remaining_block = sliceBlock(block, num_joined); - + auto remaining_block = block.cut(num_joined); added_columns.buildOutput(); + + if constexpr (join_features.need_filter) + block.filter(added_columns.filter); + + block.filterBySelector(); + for (size_t i = 0; i < added_columns.size(); ++i) - block.insert(added_columns.moveColumn(i)); + block.block->insert(added_columns.moveColumn(i)); std::vector right_keys_to_replicate [[maybe_unused]]; if constexpr (join_features.need_filter) { - /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); - /// Add join key columns from right block if needed using value from left table because of equality for (size_t i = 0; i < join.required_right_keys.columns(); ++i) { @@ -183,7 +201,7 @@ public: const auto & left_column = block.getByName(join.required_right_keys_sources[i]); const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); - block.insert(std::move(right_col)); + block.block->insert(std::move(right_col)); } } else if (has_required_right_keys) @@ -199,28 +217,17 @@ public: const auto & left_column = block.getByName(join.required_right_keys_sources[i]); auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); - block.insert(std::move(right_col)); + block.block->insert(std::move(right_col)); if constexpr (join_features.need_replication) - right_keys_to_replicate.push_back(block.getPositionByName(right_col_name)); + right_keys_to_replicate.push_back(block.block->getPositionByName(right_col_name)); } } if constexpr (join_features.need_replication) { std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; - - /// If ALL ... JOIN - we replicate all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - { - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); - } - - /// Replicate additional right keys - for (size_t pos : right_keys_to_replicate) - { - block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); - } + block.replicate(*offsets_to_replicate, existing_columns, right_keys_to_replicate); } return remaining_block; @@ -244,8 +251,16 @@ private: template static size_t NO_INLINE insertFromBlockImplTypeCase( - HashJoin & join, HashMap & map, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted) + HashJoin & join, + HashMap & map, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + const IColumn::Selector & selector, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted) { [[maybe_unused]] constexpr bool mapped_one = std::is_same_v; constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; @@ -259,9 +274,10 @@ private: /// For ALL and ASOF join always insert values is_inserted = !mapped_one || is_asof_join; - for (size_t i = 0; i < rows; ++i) + for (size_t ind : selector) { - if (null_map && (*null_map)[i]) + chassert(!null_map || ind < null_map->size()); + if (null_map && (*null_map)[ind]) { /// nulls are not inserted into hash table, /// keep them for RIGHT and FULL joins @@ -270,15 +286,16 @@ private: } /// Check condition for right table from ON section - if (join_mask && !(*join_mask)[i]) + chassert(!join_mask || ind < join_mask->size()); + if (join_mask && !(*join_mask)[ind]) continue; if constexpr (is_asof_join) - Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, *asof_column); + Inserter::insertAsof(join, map, key_getter, stored_block, ind, pool, *asof_column); else if constexpr (mapped_one) - is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, i, pool); + is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, ind, pool); else - Inserter::insertAll(join, map, key_getter, stored_block, i, pool); + Inserter::insertAll(join, map, key_getter, stored_block, ind, pool); } return map.getBufferSizeInCells(); } @@ -396,7 +413,8 @@ private: { constexpr JoinFeatures join_features; - size_t rows = added_columns.rows_to_add; + auto & block = added_columns.src_block; + size_t rows = block.rows(); if constexpr (need_filter) added_columns.filter = IColumn::Filter(rows, 0); @@ -410,12 +428,14 @@ private: size_t i = 0; for (; i < rows; ++i) { + const auto ind = block.selector[i]; + if constexpr (join_features.need_replication) { if (unlikely(current_offset >= max_joined_block_rows)) { - added_columns.offsets_to_replicate->resize_assume_reserved(i); - added_columns.filter.resize_assume_reserved(i); + added_columns.offsets_to_replicate->resize_assume_reserved(ind); + added_columns.filter.resize_assume_reserved(ind); break; } } @@ -426,12 +446,12 @@ private: for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) { const auto & join_keys = added_columns.join_on_keys[onexpr_idx]; - if (join_keys.null_map && (*join_keys.null_map)[i]) - continue; + if (join_keys.null_map && (*join_keys.null_map)[ind]) + continue; - bool row_acceptable = !join_keys.isRowFiltered(i); + bool row_acceptable = !join_keys.isRowFiltered(ind); using FindResult = typename KeyGetter::FindResult; - auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), i, pool) : FindResult(); + auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), ind, pool) : FindResult(); if (find_result.isFound()) { @@ -441,7 +461,7 @@ private: { const IColumn & left_asof_key = added_columns.leftAsofKey(); - auto row_ref = mapped->findAsof(left_asof_key, i); + auto row_ref = mapped->findAsof(left_asof_key, ind); if (row_ref.block) { setUsed(added_columns.filter, i); @@ -834,23 +854,6 @@ private: return left_row_iter; } - /// Cut first num_rows rows from block in place and returns block with remaining rows - static Block sliceBlock(Block & block, size_t num_rows) - { - size_t total_rows = block.rows(); - if (num_rows >= total_rows) - return {}; - size_t remaining_rows = total_rows - num_rows; - Block remaining_block = block.cloneEmpty(); - for (size_t i = 0; i < block.columns(); ++i) - { - auto & col = block.getByPosition(i); - remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); - col.column = col.column->cut(0, num_rows); - } - return remaining_block; - } - /** Since we do not store right key columns, * this function is used to copy left key columns to right key columns. * If the user requests some right columns, we just copy left key columns to right, since they are equal. From 54dd6aa7ee9fd11dbc981f48c40836e2edbaf7c9 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 14:57:24 +0100 Subject: [PATCH 006/267] stash --- src/Interpreters/HashJoin/HashJoin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 008a1425c4e..91239d1fa14 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -252,7 +252,7 @@ public: *this = ScatteredBlock{block->cloneWithColumns(std::move(columns))}; } - // private: + private: IColumn::Selector createTrivialSelector(size_t size) { IColumn::Selector res(size); From 2850f7aaa3cb5f60eecc4bf0452359cd962a4ee4 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 17:58:22 +0100 Subject: [PATCH 007/267] stash 2 --- src/Interpreters/ConcurrentHashJoin.cpp | 6 +-- src/Interpreters/HashJoin/AddedColumns.cpp | 4 +- src/Interpreters/HashJoin/AddedColumns.h | 4 +- src/Interpreters/HashJoin/HashJoin.cpp | 10 ++-- src/Interpreters/HashJoin/HashJoin.h | 55 +++++++++++++-------- src/Interpreters/HashJoin/HashJoinMethods.h | 19 +++---- 6 files changed, 55 insertions(+), 43 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 08f4f422496..add0b08666a 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -60,10 +60,8 @@ Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) Blocks inner_blocks; for (const auto & block : blocks) { - chassert(!block.wasScattered()); - if (block.wasScattered()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Not scattered block is expected here"); - inner_blocks.push_back(*block.block); + chassert(!block.wasScattered(), "Not scattered block is expected here"); + inner_blocks.push_back(block.getSourceBlock()); } return concatenateBlocks(inner_blocks); } diff --git a/src/Interpreters/HashJoin/AddedColumns.cpp b/src/Interpreters/HashJoin/AddedColumns.cpp index 45185021ded..78b8602ce0e 100644 --- a/src/Interpreters/HashJoin/AddedColumns.cpp +++ b/src/Interpreters/HashJoin/AddedColumns.cpp @@ -8,11 +8,11 @@ JoinOnKeyColumns::JoinOnKeyColumns( : block(block_) , key_names(key_names_) /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. - , materialized_keys_holder(JoinCommon::materializeColumns(*block.block, key_names)) + , materialized_keys_holder(JoinCommon::materializeColumns(block.getSourceBlock(), key_names)) , key_columns(JoinCommon::getRawPointers(materialized_keys_holder)) , null_map(nullptr) , null_map_holder(extractNestedColumnsAndNullMap(key_columns, null_map)) - , join_mask_column(JoinCommon::getColumnAsMask(*block.block, cond_column_name)) + , join_mask_column(JoinCommon::getColumnAsMask(block.getSourceBlock(), cond_column_name)) , key_sizes(key_sizes_) { } diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index daba6c4f2e0..fb6e32efbb8 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -34,7 +34,7 @@ struct JoinOnKeyColumns bool isRowFiltered(size_t i) const { - chassert(std::ranges::find(block.selector, i) != block.selector.end(), fmt::format("Row {} is not in block", i)); + chassert(std::ranges::find(block.getSelector(), i) != block.getSelector().end(), fmt::format("Row {} is not in block", i)); return join_mask_column.isRowFiltered(i); } }; @@ -71,7 +71,7 @@ public: bool is_asof_join, bool is_join_get_) : src_block(left_block_) - , left_block(*left_block_.block) + , left_block(left_block_.getSourceBlock()) , join_on_keys(join_on_keys_) , additional_filter_expression(additional_filter_expression_) , rows_to_add(left_block.rows()) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index e14b4d5bb01..b45dc3a08ef 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -506,7 +506,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); } - Block block_to_save = prepareRightBlock(*source_block.block); + Block block_to_save = prepareRightBlock(source_block.getSourceBlock()); if (shrink_blocks) block_to_save = block_to_save.shrinkToFit(); @@ -576,7 +576,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) save_nullmap |= (*null_map)[i]; } - auto join_mask_col = JoinCommon::getColumnAsMask(*source_block.block, onexprs[onexpr_idx].condColumnNames().second); + auto join_mask_col = JoinCommon::getColumnAsMask(source_block.getSourceBlock(), onexprs[onexpr_idx].condColumnNames().second); /// Save blocks that do not hold conditions in ON section ColumnUInt8::MutablePtr not_joined_map = nullptr; if (!flag_per_row && isRightOrFull(kind) && join_mask_col.hasData()) @@ -614,7 +614,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) key_columns, key_sizes[onexpr_idx], stored_block, - source_block.selector, + source_block.getSelector(), null_map, join_mask_col.getData(), data->pool, @@ -955,7 +955,7 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) { auto cond_column_name = onexpr.condColumnNames(); JoinCommon::checkTypesOfKeys( - *block.block, + block.getSourceBlock(), onexpr.key_names_left, cond_column_name.first, right_sample_block, @@ -979,7 +979,7 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) ScatteredBlock remaining_block = HashJoinMethods::joinBlockImpl( *this, block, sample_block_with_columns_to_add, maps_vector_); if (remaining_block.rows()) - not_processed = std::make_shared(ExtraBlock{std::move(*remaining_block.block)}); + not_processed = std::make_shared(std::move(remaining_block).getSourceBlock()); else not_processed.reset(); })) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 91239d1fa14..85137c390f0 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -1,9 +1,10 @@ #pragma once -#include -#include -#include #include +#include +#include +#include +#include #include #include @@ -29,6 +30,7 @@ #include #include +#include <__ranges/ref_view.h> #include namespace DB @@ -141,19 +143,17 @@ public: struct ScatteredBlock : private boost::noncopyable { - BlockPtr block; // TODO: we don't need shared_ptr here since if any changes are made to block, they're supposed to be private - IColumn::Selector selector; + private: + Block block; // TODO: we don't need shared_ptr here since if any changes are made to block, they're supposed to be private - ScatteredBlock(const Block & block_) : block(std::make_shared(block_)), selector(createTrivialSelector(block->rows())) { } + public: + ScatteredBlock(const Block & block_) : block(block_), selector(createTrivialSelector(block.rows())) { } - ScatteredBlock(const Block & block_, IColumn::Selector && selector_) - : block(std::make_shared(block_)), selector(std::move(selector_)) - { - } + ScatteredBlock(const Block & block_, IColumn::Selector && selector_) : block(block_), selector(std::move(selector_)) { } ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) { - other.block = nullptr; + other.block.clear(); other.selector.clear(); } @@ -164,13 +164,20 @@ public: block = std::move(other.block); selector = std::move(other.selector); - other.block = nullptr; + other.block.clear(); other.selector.clear(); } return *this; } - operator bool() const { return block && *block; } + Block & getSourceBlock() & { return block; } + const Block & getSourceBlock() const & { return block; } + + Block && getSourceBlock() && { return std::move(block); } + + const auto & getSelector() const { return selector; } + + operator bool() const { return !!block; } /// Accounts only selected rows size_t rows() const { return selector.size(); } @@ -179,19 +186,19 @@ public: bool wasScattered() const { chassert(block); - return selector.size() != block->rows(); + return selector.size() != block.rows(); } const ColumnWithTypeAndName & getByName(const std::string & name) const { chassert(block); - return block->getByName(name); + return block.getByName(name); } /// Filters selector by mask discarding rows for which filter is false void filter(const IColumn::Filter & filter) { - chassert(block && block->rows() == filter.size()); + chassert(block && block.rows() == filter.size()); auto it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); selector.resize(std::distance(selector.begin(), it)); } @@ -200,7 +207,7 @@ public: void filterBySelector() { chassert(block); - auto columns = block->getColumns(); + auto columns = block.getColumns(); for (auto & col : columns) { auto c = col->cloneEmpty(); @@ -211,7 +218,9 @@ public: col = std::move(c); } - *this = ScatteredBlock{block->cloneWithColumns(std::move(columns))}; + /// We have to to id that way because references to the block should remain valid + block.setColumns(std::move(columns)); + selector = createTrivialSelector(block.rows()); } /// Cut first num_rows rows from block in place and returns block with remaining rows @@ -225,7 +234,7 @@ public: chassert(block); IColumn::Selector remaining_selector(selector.begin() + num_rows, selector.end()); - auto remaining = ScatteredBlock{*block, std::move(remaining_selector)}; + auto remaining = ScatteredBlock{block, std::move(remaining_selector)}; selector.erase(selector.begin() + num_rows, selector.end()); @@ -237,7 +246,7 @@ public: chassert(block); chassert(offsets.size() == rows()); - auto columns = block->getColumns(); + auto columns = block.getColumns(); for (size_t i = 0; i < existing_columns; ++i) { auto c = columns[i]->replicate(offsets); @@ -249,10 +258,14 @@ public: columns[pos] = std::move(c); } - *this = ScatteredBlock{block->cloneWithColumns(std::move(columns))}; + /// We have to to id that way because references to the block should remain valid + block.setColumns(std::move(columns)); + selector = createTrivialSelector(block.rows()); } private: + IColumn::Selector selector; + IColumn::Selector createTrivialSelector(size_t size) { IColumn::Selector res(size); diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 317dde83314..c6ce10b5bad 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -117,8 +117,8 @@ public: auto ret = joinBlockImpl(join, scattered_block, block_with_columns_to_add, maps_, is_join_get); ret.filterBySelector(); scattered_block.filterBySelector(); - block = std::move(*scattered_block.block); - return *ret.block; + block = std::move(scattered_block.getSourceBlock()); + return ret.getSourceBlock(); } static HashJoin::ScatteredBlock joinBlockImpl( @@ -137,7 +137,8 @@ public: const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); } - size_t existing_columns = block.block->columns(); + auto & source_block = block.getSourceBlock(); + size_t existing_columns = source_block.columns(); /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. * Because if they are constants, then in the "not joined" rows, they may have different values @@ -146,7 +147,7 @@ public: if constexpr (join_features.right || join_features.full) { /// TODO: do materialization once before scattering the source block by hash - materializeBlockInplace(*block.block); + materializeBlockInplace(source_block); } /** For LEFT/INNER JOIN, the saved blocks do not contain keys. @@ -184,7 +185,7 @@ public: block.filterBySelector(); for (size_t i = 0; i < added_columns.size(); ++i) - block.block->insert(added_columns.moveColumn(i)); + source_block.insert(added_columns.moveColumn(i)); std::vector right_keys_to_replicate [[maybe_unused]]; @@ -201,7 +202,7 @@ public: const auto & left_column = block.getByName(join.required_right_keys_sources[i]); const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); - block.block->insert(std::move(right_col)); + source_block.insert(std::move(right_col)); } } else if (has_required_right_keys) @@ -217,10 +218,10 @@ public: const auto & left_column = block.getByName(join.required_right_keys_sources[i]); auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); - block.block->insert(std::move(right_col)); + source_block.insert(std::move(right_col)); if constexpr (join_features.need_replication) - right_keys_to_replicate.push_back(block.block->getPositionByName(right_col_name)); + right_keys_to_replicate.push_back(source_block.getPositionByName(right_col_name)); } } @@ -428,7 +429,7 @@ private: size_t i = 0; for (; i < rows; ++i) { - const auto ind = block.selector[i]; + const auto ind = block.getSelector()[i]; if constexpr (join_features.need_replication) { From a4ec9d074f6384b3c7022b0e3f824aad19bb7de1 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 22:55:05 +0100 Subject: [PATCH 008/267] stash 3 --- src/Interpreters/ConcurrentHashJoin.cpp | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index add0b08666a..25681999d01 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -66,6 +66,27 @@ Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) return concatenateBlocks(inner_blocks); } +Block materializeColumnsFromSampleBlock(const Block & block_, const Block & sample_block) +{ + Block block = block_; + for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) + { + auto & column = block.getByName(sample_column.name); + + /// There's no optimization for right side const columns. Remove constness if any. + column.column = recursiveRemoveSparse(column.column->convertToFullColumnIfConst()); + + if (column.column->lowCardinality() && !sample_column.column->lowCardinality()) + { + column.column = column.column->convertToFullColumnIfLowCardinality(); + column.type = removeLowCardinality(column.type); + } + + if (sample_column.column->isNullable()) + JoinCommon::convertColumnToNullable(column); + } + return block; +} } namespace DB @@ -176,8 +197,10 @@ ConcurrentHashJoin::~ConcurrentHashJoin() } } -bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block, bool check_limits) +bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_limits) { + /// We prematurely materialize columns here to avoid materializing columns multiple times on each thread. + Block right_block = materializeColumnsFromSampleBlock(right_block_, hash_joins[0]->data->savedBlockSample()); auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); size_t blocks_left = 0; From 6a099996d2d62b4d7ad05c9855cdb04ebd4f7705 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 23:16:25 +0100 Subject: [PATCH 009/267] stash 4 --- src/Interpreters/ConcurrentHashJoin.cpp | 13 +++++++++++-- src/Interpreters/HashJoin/HashJoin.cpp | 1 - src/Interpreters/HashJoin/HashJoin.h | 9 +++------ 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 25681999d01..1394bfaf323 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -66,9 +66,10 @@ Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) return concatenateBlocks(inner_blocks); } -Block materializeColumnsFromSampleBlock(const Block & block_, const Block & sample_block) +Block materializeColumnsFromSampleBlock(const Block & block_, const Block & sample_block, const Names & right_key_names) { Block block = block_; + for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) { auto & column = block.getByName(sample_column.name); @@ -85,6 +86,13 @@ Block materializeColumnsFromSampleBlock(const Block & block_, const Block & samp if (sample_column.column->isNullable()) JoinCommon::convertColumnToNullable(column); } + + for (const auto & column_name : right_key_names) + { + auto & column = block.getByName(column_name).column; + column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); + } + return block; } } @@ -200,7 +208,8 @@ ConcurrentHashJoin::~ConcurrentHashJoin() bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_limits) { /// We prematurely materialize columns here to avoid materializing columns multiple times on each thread. - Block right_block = materializeColumnsFromSampleBlock(right_block_, hash_joins[0]->data->savedBlockSample()); + Block right_block = materializeColumnsFromSampleBlock( + right_block_, hash_joins[0]->data->savedBlockSample(), table_join->getAllNames(JoinTableSide::Right)); auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); size_t blocks_left = 0; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index b45dc3a08ef..8a9089f376a 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -502,7 +502,6 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) for (const auto & column_name : right_key_names) { const auto & column = source_block.getByName(column_name).column; - /// TODO: do it once for the original block before splitting all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); } diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 85137c390f0..a8747ba8f3b 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -143,10 +143,6 @@ public: struct ScatteredBlock : private boost::noncopyable { - private: - Block block; // TODO: we don't need shared_ptr here since if any changes are made to block, they're supposed to be private - - public: ScatteredBlock(const Block & block_) : block(block_), selector(createTrivialSelector(block.rows())) { } ScatteredBlock(const Block & block_, IColumn::Selector && selector_) : block(block_), selector(std::move(selector_)) { } @@ -264,14 +260,15 @@ public: } private: - IColumn::Selector selector; - IColumn::Selector createTrivialSelector(size_t size) { IColumn::Selector res(size); std::iota(res.begin(), res.end(), 0); return res; } + + Block block; + IColumn::Selector selector; }; using ScatteredBlocks = std::vector; From 2fb3ec7abe4dc7883e1540166737c818bc0e458d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 25 Jul 2024 23:41:24 +0100 Subject: [PATCH 010/267] stash 5 --- src/Interpreters/ConcurrentHashJoin.cpp | 5 ++++- src/Interpreters/HashJoin/HashJoinMethods.h | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 1394bfaf323..7fad00bbd32 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -210,8 +210,8 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l /// We prematurely materialize columns here to avoid materializing columns multiple times on each thread. Block right_block = materializeColumnsFromSampleBlock( right_block_, hash_joins[0]->data->savedBlockSample(), table_join->getAllNames(JoinTableSide::Right)); - auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); + auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); size_t blocks_left = 0; for (const auto & block : dispatched_blocks) { @@ -254,6 +254,9 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & /*not_processed*/) { + if (hash_joins[0]->data->getKind() == JoinKind::Right || hash_joins[0]->data->getKind() == JoinKind::Full) + materializeBlockInplace(block); + auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); block = {}; for (size_t i = 0; i < dispatched_blocks.size(); ++i) diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index c6ce10b5bad..5acf0c51d3d 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -146,7 +146,6 @@ public: */ if constexpr (join_features.right || join_features.full) { - /// TODO: do materialization once before scattering the source block by hash materializeBlockInplace(source_block); } From 83b79fce832ef4bd3fa230ae8a66d23935724985 Mon Sep 17 00:00:00 2001 From: qhsong Date: Wed, 8 May 2024 16:18:13 +0800 Subject: [PATCH 011/267] Add repeatable uniq ID for processor and step --- src/Common/ThreadStatus.cpp | 10 +++++++ src/Common/ThreadStatus.h | 9 +++++++ src/Interpreters/Context.h | 2 ++ src/Interpreters/ProcessorsProfileLog.cpp | 4 +++ src/Interpreters/ProcessorsProfileLog.h | 4 ++- src/Interpreters/executeQuery.cpp | 2 ++ .../Executors/ExecutionThreadContext.cpp | 2 +- src/Processors/IProcessor.h | 26 ++++++++++++++++--- src/Processors/QueryPlan/IQueryPlanStep.h | 12 ++++++++- src/Processors/QueryPlan/QueryPlan.cpp | 1 + src/QueryPipeline/QueryPipelineBuilder.cpp | 4 +-- src/QueryPipeline/printPipeline.cpp | 4 +-- src/QueryPipeline/printPipeline.h | 2 +- .../01786_explain_merge_tree.reference | 2 ++ 14 files changed, 73 insertions(+), 11 deletions(-) diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index ad96018a17e..74b5475da77 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -221,6 +221,16 @@ bool ThreadStatus::isQueryCanceled() const return false; } +size_t ThreadStatus::incrStepIndex() +{ + return ++(*local_data.step_count); +} + +size_t ThreadStatus::incrProcessorIndex() +{ + return ++(*local_data.processor_count); +} + ThreadStatus::~ThreadStatus() { flushUntrackedMemory(); diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 0c02ab8fdb0..97b45c01e54 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -10,6 +10,7 @@ #include +#include #include #include #include @@ -90,6 +91,11 @@ public: String query_for_logs; UInt64 normalized_query_hash = 0; + //QueryPlan can not build parallel, but processor may build parallel in expand() function. + //so we use atomic_size_t for processor_count + std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr processor_count = std::make_shared(0); + QueryIsCanceledPredicate query_is_canceled_predicate = {}; }; @@ -309,6 +315,9 @@ public: void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period); + size_t incrStepIndex(); + size_t incrProcessorIndex(); + private: void applyGlobalSettings(); void applyQuerySettings(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d1ff5b4c2b2..692d71a3384 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1336,7 +1336,9 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling + public: + ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Interpreters/ProcessorsProfileLog.cpp b/src/Interpreters/ProcessorsProfileLog.cpp index 7dec2a3163a..8e1cf278c63 100644 --- a/src/Interpreters/ProcessorsProfileLog.cpp +++ b/src/Interpreters/ProcessorsProfileLog.cpp @@ -42,6 +42,8 @@ ColumnsDescription ProcessorProfileLogElement::getColumnsDescription() {"input_bytes", std::make_shared(), "The number of bytes consumed by processor."}, {"output_rows", std::make_shared(), "The number of rows generated by processor."}, {"output_bytes", std::make_shared(), "The number of bytes generated by processor."}, + {"processor_uniq_id", std::make_shared(), "The uniq processor id in pipeline."}, + {"step_uniq_id", std::make_shared(), "The uniq step id in plan."}, }; } @@ -75,6 +77,8 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(input_bytes); columns[i++]->insert(output_rows); columns[i++]->insert(output_bytes); + columns[i++]->insert(processor_uniq_id); + columns[i++]->insert(step_uniq_id); } diff --git a/src/Interpreters/ProcessorsProfileLog.h b/src/Interpreters/ProcessorsProfileLog.h index 8319d373f39..1b2abaa8ede 100644 --- a/src/Interpreters/ProcessorsProfileLog.h +++ b/src/Interpreters/ProcessorsProfileLog.h @@ -17,12 +17,14 @@ struct ProcessorProfileLogElement UInt64 id{}; std::vector parent_ids; - UInt64 plan_step{}; + UInt64 plan_step; UInt64 plan_group{}; String initial_query_id; String query_id; String processor_name; + String processor_uniq_id; + String step_uniq_id; /// Milliseconds spend in IProcessor::work() UInt32 elapsed_us{}; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 33a4cf2a74c..59573e912e4 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -460,6 +460,8 @@ void logQueryFinish( processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); processor_elem.plan_group = processor->getQueryPlanStepGroup(); + processor_elem.processor_uniq_id = processor->getUniqID(); + processor_elem.step_uniq_id = processor->getStepUniqID(); processor_elem.processor_name = processor->getName(); diff --git a/src/Processors/Executors/ExecutionThreadContext.cpp b/src/Processors/Executors/ExecutionThreadContext.cpp index 05669725f9a..06b4b53c817 100644 --- a/src/Processors/Executors/ExecutionThreadContext.cpp +++ b/src/Processors/Executors/ExecutionThreadContext.cpp @@ -79,7 +79,7 @@ bool ExecutionThreadContext::executeTask() if (trace_processors) { - span = std::make_unique(node->processor->getName()); + span = std::make_unique(node->processor->getUniqID()); span->addAttribute("thread_number", thread_number); } std::optional execution_time_watch; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 56b4509fe00..b99ebeb5fa5 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -3,7 +3,9 @@ #include #include #include - +#include +#include +#include class EventCounter; @@ -121,7 +123,10 @@ protected: OutputPorts outputs; public: - IProcessor() = default; + IProcessor() + { + setProcessorIndex(); + } IProcessor(InputPorts inputs_, OutputPorts outputs_) : inputs(std::move(inputs_)), outputs(std::move(outputs_)) @@ -130,9 +135,16 @@ public: port.processor = this; for (auto & port : outputs) port.processor = this; + setProcessorIndex(); + } + + void setProcessorIndex() + { + processor_index = CurrentThread::get().incrProcessorIndex(); } virtual String getName() const = 0; + String getUniqID() const { return fmt::format("{}_{}", getName(), processor_index); } enum class Status { @@ -300,11 +312,16 @@ public: /// Step of QueryPlan from which processor was created. void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0) { - query_plan_step = step; + if (step != nullptr) + { + query_plan_step = step; + step_uniq_id = step->getUniqID(); + } query_plan_step_group = group; } IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; } + const String &getStepUniqID() const { return step_uniq_id; } size_t getQueryPlanStepGroup() const { return query_plan_step_group; } uint64_t getElapsedUs() const { return elapsed_us; } @@ -392,7 +409,10 @@ private: size_t stream_number = NO_STREAM; IQueryPlanStep * query_plan_step = nullptr; + String step_uniq_id; size_t query_plan_step_group = 0; + + size_t processor_index = 0; }; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index ac5ea259d2e..ec5ac9ad4dc 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -2,6 +2,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -71,6 +74,10 @@ using QueryPlanRawPtrs = std::list; class IQueryPlanStep { public: + IQueryPlanStep() + { + step_index = CurrentThread::get().incrStepIndex(); + } virtual ~IQueryPlanStep() = default; virtual String getName() const = 0; @@ -138,7 +145,7 @@ public: } virtual bool canUpdateInputStream() const { return false; } - + String getUniqID() const { return fmt::format("{}_{}", getName(), step_index); } protected: virtual void updateOutputStream() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented"); } @@ -153,6 +160,9 @@ protected: Processors processors; static void describePipeline(const Processors & processors, FormatSettings & settings); + +private: + size_t step_index = 0; }; using QueryPlanStepPtr = std::unique_ptr; diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index 0fae7e8df4d..f651870453b 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -206,6 +206,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options) { map.add("Node Type", step.getName()); + map.add("Node Id", step.getUniqID()); if (options.description) { diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 67a8fe5dcab..4b6f15905ce 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -400,10 +400,10 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe left->pipe.collected_processors = collected_processors; - /// Collect the NEW processors for the right pipeline. - QueryPipelineProcessorsCollector collector(*right); /// Remember the last step of the right pipeline. IQueryPlanStep * step = right->pipe.processors->back()->getQueryPlanStep(); + /// Collect the NEW processors for the right pipeline. + QueryPipelineProcessorsCollector collector(*right, step); /// In case joined subquery has totals, and we don't, add default chunk to totals. bool default_totals = false; diff --git a/src/QueryPipeline/printPipeline.cpp b/src/QueryPipeline/printPipeline.cpp index 40c88502ed0..1726d776921 100644 --- a/src/QueryPipeline/printPipeline.cpp +++ b/src/QueryPipeline/printPipeline.cpp @@ -113,7 +113,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool if (item.first != nullptr) { out << " subgraph cluster_" << next_step << " {\n"; - out << " label =\"" << item.first->getName() << "\";\n"; + out << " label =\"" << item.first->getUniqID() << "\";\n"; out << " style=filled;\n"; out << " color=lightgrey;\n"; out << " node [style=filled,color=white];\n"; @@ -125,7 +125,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool for (const auto & node : item.second) { const auto & processor = node->agents.front(); - out << " n" << node->id << " [label=\"" << processor->getName(); + out << " n" << node->id << " [label=\"" << processor->getUniqID(); if (node->agents.size() > 1) out << " × " << node->agents.size(); diff --git a/src/QueryPipeline/printPipeline.h b/src/QueryPipeline/printPipeline.h index 2bdbd8f7a07..e6799251851 100644 --- a/src/QueryPipeline/printPipeline.h +++ b/src/QueryPipeline/printPipeline.h @@ -30,7 +30,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri for (const auto & processor : processors) { const auto & description = processor->getDescription(); - out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << (description.empty() ? "" : ":") << description; + out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getUniqID() << (description.empty() ? "" : ":") << description; if (statuses_iter != statuses.end()) { diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 3a015d32539..36ebbe1a1da 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,6 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { @@ -126,6 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { From 220d32039c028affc1c7378a971529a6e839e811 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 30 Jul 2024 22:16:37 +0100 Subject: [PATCH 012/267] stash 6 --- src/Interpreters/HashJoin/HashJoin.cpp | 66 ++++++++++++++++++-------- src/Interpreters/HashJoin/HashJoin.h | 3 +- 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 8a9089f376a..ddc6abe9d0d 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -77,6 +77,44 @@ Int64 getCurrentQueryMemoryUsage() return 0; } +Block filterColumnsPresentInSampleBlock(const Block & block, const Block & sample_block) +{ + Block filtered_block; + for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) + { + ColumnWithTypeAndName column = block.getByName(sample_column.name); + filtered_block.insert(std::move(column)); + } + return filtered_block; +} + +Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, const Names & right_key_names) +{ + for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) + { + auto & column = block.getByName(sample_column.name); + + /// There's no optimization for right side const columns. Remove constness if any. + column.column = recursiveRemoveSparse(column.column->convertToFullColumnIfConst()); + + if (column.column->lowCardinality() && !sample_column.column->lowCardinality()) + { + column.column = column.column->convertToFullColumnIfLowCardinality(); + column.type = removeLowCardinality(column.type); + } + + if (sample_column.column->isNullable()) + JoinCommon::convertColumnToNullable(column); + } + + for (const auto & column_name : right_key_names) + { + auto & column = block.getByName(column_name).column; + column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); + } + + return block; +} } static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable) @@ -411,29 +449,15 @@ void HashJoin::initRightBlockStructure(Block & saved_block_sample) } } +Block HashJoin::materializeColumnsFromRightBlock(Block block) const +{ + return DB::materializeColumnsFromRightBlock(std::move(block), savedBlockSample(), table_join->getAllNames(JoinTableSide::Right)); +} + Block HashJoin::prepareRightBlock(const Block & block, const Block & saved_block_sample_) { - Block structured_block; - for (const auto & sample_column : saved_block_sample_.getColumnsWithTypeAndName()) - { - ColumnWithTypeAndName column = block.getByName(sample_column.name); - - /// There's no optimization for right side const columns. Remove constness if any. - column.column = recursiveRemoveSparse(column.column->convertToFullColumnIfConst()); - - if (column.column->lowCardinality() && !sample_column.column->lowCardinality()) - { - column.column = column.column->convertToFullColumnIfLowCardinality(); - column.type = removeLowCardinality(column.type); - } - - if (sample_column.column->isNullable()) - JoinCommon::convertColumnToNullable(column); - - structured_block.insert(std::move(column)); - } - - return structured_block; + Block structured_block = DB::materializeColumnsFromRightBlock(block, saved_block_sample_, {}); + return filterColumnsPresentInSampleBlock(structured_block, saved_block_sample_); } Block HashJoin::prepareRightBlock(const Block & block) const diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index a8747ba8f3b..8d4513e4349 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -518,6 +518,8 @@ public: void setMaxJoinedBlockRows(size_t value) { max_joined_block_rows = value; } + Block materializeColumnsFromRightBlock(Block block) const; + private: friend class NotJoinedHash; @@ -596,5 +598,4 @@ private: void validateAdditionalFilterExpression(std::shared_ptr additional_filter_expression); bool needUsedFlagsForPerRightTableRow(std::shared_ptr table_join_) const; }; - } From 33af77cda79a595eea4610cc879cdae193a253ec Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 30 Jul 2024 22:20:42 +0100 Subject: [PATCH 013/267] stash 7 --- src/Interpreters/ConcurrentHashJoin.cpp | 32 +------------------------ 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 7fad00bbd32..772e14a9867 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -66,35 +66,6 @@ Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) return concatenateBlocks(inner_blocks); } -Block materializeColumnsFromSampleBlock(const Block & block_, const Block & sample_block, const Names & right_key_names) -{ - Block block = block_; - - for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) - { - auto & column = block.getByName(sample_column.name); - - /// There's no optimization for right side const columns. Remove constness if any. - column.column = recursiveRemoveSparse(column.column->convertToFullColumnIfConst()); - - if (column.column->lowCardinality() && !sample_column.column->lowCardinality()) - { - column.column = column.column->convertToFullColumnIfLowCardinality(); - column.type = removeLowCardinality(column.type); - } - - if (sample_column.column->isNullable()) - JoinCommon::convertColumnToNullable(column); - } - - for (const auto & column_name : right_key_names) - { - auto & column = block.getByName(column_name).column; - column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); - } - - return block; -} } namespace DB @@ -208,8 +179,7 @@ ConcurrentHashJoin::~ConcurrentHashJoin() bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_limits) { /// We prematurely materialize columns here to avoid materializing columns multiple times on each thread. - Block right_block = materializeColumnsFromSampleBlock( - right_block_, hash_joins[0]->data->savedBlockSample(), table_join->getAllNames(JoinTableSide::Right)); + Block right_block = hash_joins[0]->data->materializeColumnsFromRightBlock(right_block_); auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); size_t blocks_left = 0; From ffd1a6c3a54ca5f223a5559cc9333b446e7d7f04 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 30 Jul 2024 23:00:47 +0100 Subject: [PATCH 014/267] stash 8 --- src/Interpreters/HashJoin/HashJoin.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index ddc6abe9d0d..1c08a951c6c 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -88,7 +88,7 @@ Block filterColumnsPresentInSampleBlock(const Block & block, const Block & sampl return filtered_block; } -Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, const Names & right_key_names) +Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, const Names &) { for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) { @@ -107,11 +107,11 @@ Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, JoinCommon::convertColumnToNullable(column); } - for (const auto & column_name : right_key_names) - { - auto & column = block.getByName(column_name).column; - column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); - } + // for (const auto & column_name : right_key_names) + // { + // auto & column = block.getByName(column_name).column; + // column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); + // } return block; } @@ -467,7 +467,8 @@ Block HashJoin::prepareRightBlock(const Block & block) const bool HashJoin::addBlockToJoin(const Block & source_block, bool check_limits) { - auto scattered_block = ScatteredBlock{source_block}; + auto materialized = materializeColumnsFromRightBlock(source_block); + auto scattered_block = ScatteredBlock{materialized}; return addBlockToJoin(scattered_block, check_limits); } @@ -529,7 +530,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); } - Block block_to_save = prepareRightBlock(source_block.getSourceBlock()); + Block block_to_save = filterColumnsPresentInSampleBlock(source_block.getSourceBlock(), savedBlockSample()); if (shrink_blocks) block_to_save = block_to_save.shrinkToFit(); From 59bd7447fcb1db44bc77d93339b36dae684d5daf Mon Sep 17 00:00:00 2001 From: qhsong Date: Tue, 30 Jul 2024 10:23:51 +0800 Subject: [PATCH 015/267] Fix testcase --- src/Common/ThreadStatus.h | 2 +- src/Interpreters/Context.h | 2 -- src/Processors/IProcessor.h | 5 ++++- src/Processors/QueryPlan/IQueryPlanStep.h | 5 ++++- .../0_stateless/01786_explain_merge_tree.reference | 4 ++-- .../0_stateless/01823_explain_json.reference | 13 +++++++++++-- .../03213_distributed_analyzer.reference | 2 +- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 97b45c01e54..fd384ad1603 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -93,7 +93,7 @@ public: //QueryPlan can not build parallel, but processor may build parallel in expand() function. //so we use atomic_size_t for processor_count - std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr step_count = std::make_shared(0); std::shared_ptr processor_count = std::make_shared(0); QueryIsCanceledPredicate query_is_canceled_predicate = {}; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 1f331f0d094..cb553d07513 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1404,9 +1404,7 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling - public: - ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 3933a79ab55..d426d5ef9ba 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -140,7 +140,10 @@ public: void setProcessorIndex() { - processor_index = CurrentThread::get().incrProcessorIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + processor_index = CurrentThread::get().incrProcessorIndex(); + } } virtual String getName() const = 0; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index acd8857b9df..500e0812983 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -76,7 +76,10 @@ class IQueryPlanStep public: IQueryPlanStep() { - step_index = CurrentThread::get().incrStepIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + step_index = CurrentThread::get().incrStepIndex(); + } } virtual ~IQueryPlanStep() = default; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 36ebbe1a1da..75736669905 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,7 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { @@ -127,7 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { diff --git a/tests/queries/0_stateless/01823_explain_json.reference b/tests/queries/0_stateless/01823_explain_json.reference index 23fb34c2192..1aa5aa134e9 100644 --- a/tests/queries/0_stateless/01823_explain_json.reference +++ b/tests/queries/0_stateless/01823_explain_json.reference @@ -2,20 +2,25 @@ { "Plan": { "Node Type": "Union", + "Node Id": "Union_11", "Plans": [ { "Node Type": "Expression", + "Node Id": "Expression_14", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_1" } ] }, { "Node Type": "Expression", + "Node Id": "Expression_17", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_5" } ] } @@ -35,6 +40,7 @@ } -------- "Node Type": "Aggregating", + "Node Id": "Aggregating_4", "Header": [ { "Name": "__table1.number", @@ -73,13 +79,16 @@ ], -------- "Node Type": "ArrayJoin", + "Node Id": "ArrayJoin_5", "Left": false, "Columns": ["__table1.x", "__table1.y"], -------- "Node Type": "Distinct", + "Node Id": "Distinct_5", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -- "Node Type": "Distinct", + "Node Id": "Distinct_4", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -------- "Sort Description": [ diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.reference b/tests/queries/0_stateless/03213_distributed_analyzer.reference index 9d63c0a7a5e..2456192ca9d 100644 --- a/tests/queries/0_stateless/03213_distributed_analyzer.reference +++ b/tests/queries/0_stateless/03213_distributed_analyzer.reference @@ -1 +1 @@ -['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}'] +['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}'] From 3ca66293cab71fbc76396dcfafa5592181246538 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 7 Aug 2024 22:58:53 +0100 Subject: [PATCH 016/267] fix --- src/Interpreters/HashJoin/HashJoin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 8d4513e4349..96a325ec367 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -225,7 +225,7 @@ public: SCOPE_EXIT(filterBySelector()); if (num_rows >= rows()) - return Block{}; + return block.cloneEmpty(); chassert(block); From cc379ce941c0e172541b40675d5fefcead73d255 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 8 Aug 2024 13:06:14 +0100 Subject: [PATCH 017/267] fix tidy --- src/Interpreters/ConcurrentHashJoin.cpp | 2 +- src/Interpreters/HashJoin/HashJoin.h | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 772e14a9867..cb069d2fac7 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -208,7 +208,7 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l bool limit_exceeded = !hash_join->data->addBlockToJoin(dispatched_block, check_limits); - dispatched_block = Block{}; + dispatched_block = {}; blocks_left--; if (limit_exceeded) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 96a325ec367..45e6a739a5d 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -143,7 +143,9 @@ public: struct ScatteredBlock : private boost::noncopyable { - ScatteredBlock(const Block & block_) : block(block_), selector(createTrivialSelector(block.rows())) { } + ScatteredBlock() = default; + + explicit ScatteredBlock(const Block & block_) : block(block_), selector(createTrivialSelector(block.rows())) { } ScatteredBlock(const Block & block_, IColumn::Selector && selector_) : block(block_), selector(std::move(selector_)) { } @@ -173,7 +175,7 @@ public: const auto & getSelector() const { return selector; } - operator bool() const { return !!block; } + explicit operator bool() const { return !!block; } /// Accounts only selected rows size_t rows() const { return selector.size(); } @@ -195,7 +197,7 @@ public: void filter(const IColumn::Filter & filter) { chassert(block && block.rows() == filter.size()); - auto it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); + auto * it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); selector.resize(std::distance(selector.begin(), it)); } @@ -215,7 +217,7 @@ public: } /// We have to to id that way because references to the block should remain valid - block.setColumns(std::move(columns)); + block.setColumns(columns); selector = createTrivialSelector(block.rows()); } @@ -225,7 +227,7 @@ public: SCOPE_EXIT(filterBySelector()); if (num_rows >= rows()) - return block.cloneEmpty(); + return ScatteredBlock{block.cloneEmpty()}; chassert(block); @@ -255,7 +257,7 @@ public: } /// We have to to id that way because references to the block should remain valid - block.setColumns(std::move(columns)); + block.setColumns(columns); selector = createTrivialSelector(block.rows()); } From 01179a1eb4cf37227fc85a8e073c20f07f7428a9 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 8 Aug 2024 13:53:08 +0100 Subject: [PATCH 018/267] disable check --- src/Interpreters/HashJoin/HashJoin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 1c08a951c6c..20002225cbc 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -379,7 +379,7 @@ size_t HashJoin::getTotalByteCount() const if (!data) return 0; -#ifndef NDEBUG +#if !defined(NDEBUG) && 0 size_t debug_blocks_allocated_size = 0; for (const auto & block : data->blocks) debug_blocks_allocated_size += block.allocatedBytes(); From 7f69df63fdd80d43d1eb0a9be5d7fc17101b04af Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 8 Aug 2024 15:00:34 +0100 Subject: [PATCH 019/267] small opt --- src/Interpreters/HashJoin/HashJoin.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 45e6a739a5d..a25e6b6fb7c 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -205,6 +206,10 @@ public: void filterBySelector() { chassert(block); + + if (!wasScattered()) + return; + auto columns = block.getColumns(); for (auto & col : columns) { From 6b021c6896d95d86f2076a1e07ac7d0420b168a4 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 8 Aug 2024 16:18:17 +0100 Subject: [PATCH 020/267] stash --- src/Interpreters/ConcurrentHashJoin.cpp | 5 ++- src/Interpreters/HashJoin/AddedColumns.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 48 +++++++++++------------- src/Interpreters/HashJoin/HashJoin.h | 7 ++-- 4 files changed, 29 insertions(+), 33 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index cb069d2fac7..07fb6904f97 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -60,7 +60,7 @@ Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) Blocks inner_blocks; for (const auto & block : blocks) { - chassert(!block.wasScattered(), "Not scattered block is expected here"); + chassert(!block.wasScattered(), "Not scattered blocks are expected in join result"); inner_blocks.push_back(block.getSourceBlock()); } return concatenateBlocks(inner_blocks); @@ -178,7 +178,8 @@ ConcurrentHashJoin::~ConcurrentHashJoin() bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_limits) { - /// We prematurely materialize columns here to avoid materializing columns multiple times on each thread. + /// We materialize columns here to avoid materializing them multiple times on different threads + /// (inside different `hash_join`-s) because the block will be shared. Block right_block = hash_joins[0]->data->materializeColumnsFromRightBlock(right_block_); auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index fb6e32efbb8..91db042bdb5 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -34,7 +34,7 @@ struct JoinOnKeyColumns bool isRowFiltered(size_t i) const { - chassert(std::ranges::find(block.getSelector(), i) != block.getSelector().end(), fmt::format("Row {} is not in block", i)); + chassert(block.contains(i), fmt::format("Row {} is not in block", i)); return join_mask_column.isRowFiltered(i); } }; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 20002225cbc..4852be98743 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -81,10 +81,7 @@ Block filterColumnsPresentInSampleBlock(const Block & block, const Block & sampl { Block filtered_block; for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) - { - ColumnWithTypeAndName column = block.getByName(sample_column.name); - filtered_block.insert(std::move(column)); - } + filtered_block.insert(block.getByName(sample_column.name)); return filtered_block; } @@ -456,8 +453,8 @@ Block HashJoin::materializeColumnsFromRightBlock(Block block) const Block HashJoin::prepareRightBlock(const Block & block, const Block & saved_block_sample_) { - Block structured_block = DB::materializeColumnsFromRightBlock(block, saved_block_sample_, {}); - return filterColumnsPresentInSampleBlock(structured_block, saved_block_sample_); + Block prepared_block = DB::materializeColumnsFromRightBlock(block, saved_block_sample_, {}); + return filterColumnsPresentInSampleBlock(prepared_block, saved_block_sample_); } Block HashJoin::prepareRightBlock(const Block & block) const @@ -975,6 +972,8 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); + chassert(kind == JoinKind::Left || kind == JoinKind::Inner); + for (const auto & onexpr : table_join->getClauses()) { auto cond_column_name = onexpr.condColumnNames(); @@ -987,31 +986,26 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) cond_column_name.second); } - chassert(kind == JoinKind::Left || kind == JoinKind::Inner); - std::vectormaps[0])> *> maps_vector; for (size_t i = 0; i < table_join->getClauses().size(); ++i) maps_vector.push_back(&data->maps[i]); - if (joinDispatch( - kind, - strictness, - maps_vector, - [&](auto kind_, auto strictness_, auto & maps_vector_) - { - using MapType = typename MapGetter::Map; - ScatteredBlock remaining_block = HashJoinMethods::joinBlockImpl( - *this, block, sample_block_with_columns_to_add, maps_vector_); - if (remaining_block.rows()) - not_processed = std::make_shared(std::move(remaining_block).getSourceBlock()); - else - not_processed.reset(); - })) - { - /// Joined - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: {} {}", strictness, kind); + const bool joined = joinDispatch( + kind, + strictness, + maps_vector, + [&](auto kind_, auto strictness_, auto & maps_vector_) + { + using MapType = typename MapGetter::Map; + ScatteredBlock remaining_block + = HashJoinMethods::joinBlockImpl(*this, block, sample_block_with_columns_to_add, maps_vector_); + if (remaining_block.rows()) + not_processed = std::make_shared(std::move(remaining_block).getSourceBlock()); + else + not_processed.reset(); + }); + + chassert(joined); } HashJoin::~HashJoin() diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index a25e6b6fb7c..2f8a12a4a01 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -146,9 +146,9 @@ public: { ScatteredBlock() = default; - explicit ScatteredBlock(const Block & block_) : block(block_), selector(createTrivialSelector(block.rows())) { } + explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(createTrivialSelector(block.rows())) { } - ScatteredBlock(const Block & block_, IColumn::Selector && selector_) : block(block_), selector(std::move(selector_)) { } + ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) { @@ -176,6 +176,8 @@ public: const auto & getSelector() const { return selector; } + bool contains(size_t idx) const { return std::find(selector.begin(), selector.end(), idx) != selector.end(); } + explicit operator bool() const { return !!block; } /// Accounts only selected rows @@ -261,7 +263,6 @@ public: columns[pos] = std::move(c); } - /// We have to to id that way because references to the block should remain valid block.setColumns(columns); selector = createTrivialSelector(block.rows()); } From e8585a3740641874302d4ec545c2aa58b3935ead Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 17:47:55 +0100 Subject: [PATCH 021/267] stash --- src/Interpreters/ConcurrentHashJoin.cpp | 3 +-- src/Interpreters/HashJoin/HashJoin.cpp | 17 +++++++++++++---- src/Interpreters/HashJoin/HashJoin.h | 1 + src/Interpreters/HashJoin/HashJoinMethods.h | 9 --------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 07fb6904f97..ccbcb00a6a8 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -225,8 +225,7 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & /*not_processed*/) { - if (hash_joins[0]->data->getKind() == JoinKind::Right || hash_joins[0]->data->getKind() == JoinKind::Full) - materializeBlockInplace(block); + hash_joins[0]->data->materializeColumnsFromLeftBlock(block); auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); block = {}; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 558441fb05a..aa3cc914913 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -446,6 +446,18 @@ void HashJoin::initRightBlockStructure(Block & saved_block_sample) } } +void HashJoin::materializeColumnsFromLeftBlock(Block & block) const +{ + /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. + * Because if they are constants, then in the "not joined" rows, they may have different values + * - default values, which can differ from the values of these constants. + */ + if (kind == JoinKind::Right || kind == JoinKind::Full) + { + materializeBlockInplace(block); + } +} + Block HashJoin::materializeColumnsFromRightBlock(Block block) const { return DB::materializeColumnsFromRightBlock(std::move(block), savedBlockSample(), table_join->getAllNames(JoinTableSide::Right)); @@ -943,10 +955,7 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) return; } - if (kind == JoinKind::Right || kind == JoinKind::Full) - { - materializeBlockInplace(block); - } + materializeColumnsFromLeftBlock(block); { std::vectormaps[0])> * > maps_vector; diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 524f9925d83..bc31c4a434e 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -526,6 +526,7 @@ public: void setMaxJoinedBlockRows(size_t value) { max_joined_block_rows = value; } + void materializeColumnsFromLeftBlock(Block & block) const; Block materializeColumnsFromRightBlock(Block block) const; private: diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 5acf0c51d3d..7a290087b5e 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -140,15 +140,6 @@ public: auto & source_block = block.getSourceBlock(); size_t existing_columns = source_block.columns(); - /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. - * Because if they are constants, then in the "not joined" rows, they may have different values - * - default values, which can differ from the values of these constants. - */ - if constexpr (join_features.right || join_features.full) - { - materializeBlockInplace(source_block); - } - /** For LEFT/INNER JOIN, the saved blocks do not contain keys. * For FULL/RIGHT JOIN, the saved blocks contain keys; * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. From 5b187b62f0bca6f7f931bcbed1ba95dbec553814 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 18:16:19 +0100 Subject: [PATCH 022/267] fix tidy --- src/Interpreters/ConcurrentHashJoin.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 8e75bcd874b..5c32ebb32f0 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -47,7 +47,7 @@ public: std::string getName() const override { return "ConcurrentHashJoin"; } const TableJoin & getTableJoin() const override { return *table_join; } - bool addBlockToJoin(const Block & block, bool check_limits) override; + bool addBlockToJoin(const Block & right_block_, bool check_limits) override; void checkTypesOfKeys(const Block & block) const override; void joinBlock(Block & block, std::shared_ptr & not_processed) override; void setTotals(const Block & block) override; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index aa3cc914913..1285cdf5035 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -506,7 +506,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) /// We support only INNER/LEFT ASOF join, so rows with NULLs never return from the right joined table. /// So filter them out here not to handle in implementation. const auto & asof_key_name = table_join->getOnlyClause().key_names_right.back(); - auto & asof_column = source_block.getByName(asof_key_name); + const auto & asof_column = source_block.getByName(asof_key_name); if (asof_column.type->isNullable()) { From eb8af558f8e52d296311eff45ec212ccf7f0232b Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 20:21:27 +0100 Subject: [PATCH 023/267] stash --- src/Interpreters/HashJoin/HashJoin.h | 163 ++------------------- src/Interpreters/HashJoin/ScatteredBlock.h | 151 +++++++++++++++++++ 2 files changed, 160 insertions(+), 154 deletions(-) create mode 100644 src/Interpreters/HashJoin/ScatteredBlock.h diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index bc31c4a434e..99f2e58069d 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -14,25 +14,19 @@ #include #include -#include -#include -#include -#include -#include - -#include #include - -#include - +#include #include - -#include +#include #include #include - -#include <__ranges/ref_view.h> -#include +#include +#include +#include +#include +#include +#include +#include namespace DB { @@ -142,145 +136,6 @@ public: return std::make_shared(table_join_, right_sample_block_, any_take_last_row, reserve_num, instance_id); } - struct ScatteredBlock : private boost::noncopyable - { - ScatteredBlock() = default; - - explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(createTrivialSelector(block.rows())) { } - - ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } - - ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) - { - other.block.clear(); - other.selector.clear(); - } - - ScatteredBlock & operator=(ScatteredBlock && other) noexcept - { - if (this != &other) - { - block = std::move(other.block); - selector = std::move(other.selector); - - other.block.clear(); - other.selector.clear(); - } - return *this; - } - - Block & getSourceBlock() & { return block; } - const Block & getSourceBlock() const & { return block; } - - Block && getSourceBlock() && { return std::move(block); } - - const auto & getSelector() const { return selector; } - - bool contains(size_t idx) const { return std::find(selector.begin(), selector.end(), idx) != selector.end(); } - - explicit operator bool() const { return !!block; } - - /// Accounts only selected rows - size_t rows() const { return selector.size(); } - - /// Whether block was scattered, i.e. has non-trivial selector - bool wasScattered() const - { - chassert(block); - return selector.size() != block.rows(); - } - - const ColumnWithTypeAndName & getByName(const std::string & name) const - { - chassert(block); - return block.getByName(name); - } - - /// Filters selector by mask discarding rows for which filter is false - void filter(const IColumn::Filter & filter) - { - chassert(block && block.rows() == filter.size()); - auto * it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); - selector.resize(std::distance(selector.begin(), it)); - } - - /// Applies selector to block in place - void filterBySelector() - { - chassert(block); - - if (!wasScattered()) - return; - - auto columns = block.getColumns(); - for (auto & col : columns) - { - auto c = col->cloneEmpty(); - c->reserve(selector.size()); - /// TODO: create new method in IColumnHelper to devirtualize - for (const auto idx : selector) - c->insertFrom(*col, idx); - col = std::move(c); - } - - /// We have to to id that way because references to the block should remain valid - block.setColumns(columns); - selector = createTrivialSelector(block.rows()); - } - - /// Cut first num_rows rows from block in place and returns block with remaining rows - ScatteredBlock cut(size_t num_rows) - { - SCOPE_EXIT(filterBySelector()); - - if (num_rows >= rows()) - return ScatteredBlock{block.cloneEmpty()}; - - chassert(block); - - IColumn::Selector remaining_selector(selector.begin() + num_rows, selector.end()); - auto remaining = ScatteredBlock{block, std::move(remaining_selector)}; - - selector.erase(selector.begin() + num_rows, selector.end()); - - return remaining; - } - - void replicate(const IColumn::Offsets & offsets, size_t existing_columns, const std::vector & right_keys_to_replicate) - { - chassert(block); - chassert(offsets.size() == rows()); - - auto columns = block.getColumns(); - for (size_t i = 0; i < existing_columns; ++i) - { - auto c = columns[i]->replicate(offsets); - columns[i] = std::move(c); - } - for (size_t pos : right_keys_to_replicate) - { - auto c = columns[pos]->replicate(offsets); - columns[pos] = std::move(c); - } - - block.setColumns(columns); - selector = createTrivialSelector(block.rows()); - } - - private: - IColumn::Selector createTrivialSelector(size_t size) - { - IColumn::Selector res(size); - std::iota(res.begin(), res.end(), 0); - return res; - } - - Block block; - IColumn::Selector selector; - }; - - using ScatteredBlocks = std::vector; - /** Add block of data from right hand of JOIN to the map. * Returns false, if some limit was exceeded and you should not insert more data. */ diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h new file mode 100644 index 00000000000..8dc3c60eb07 --- /dev/null +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -0,0 +1,151 @@ +#pragma once + +#include +#include +#include + +#include + +namespace DB +{ + +struct ScatteredBlock : private boost::noncopyable +{ + ScatteredBlock() = default; + + explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(createTrivialSelector(block.rows())) { } + + ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } + + ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) + { + other.block.clear(); + other.selector.clear(); + } + + ScatteredBlock & operator=(ScatteredBlock && other) noexcept + { + if (this != &other) + { + block = std::move(other.block); + selector = std::move(other.selector); + + other.block.clear(); + other.selector.clear(); + } + return *this; + } + + Block & getSourceBlock() & { return block; } + const Block & getSourceBlock() const & { return block; } + + Block && getSourceBlock() && { return std::move(block); } + + const auto & getSelector() const { return selector; } + + bool contains(size_t idx) const { return std::find(selector.begin(), selector.end(), idx) != selector.end(); } + + explicit operator bool() const { return !!block; } + + /// Accounts only selected rows + size_t rows() const { return selector.size(); } + + /// Whether block was scattered, i.e. has non-trivial selector + bool wasScattered() const + { + chassert(block); + return selector.size() != block.rows(); + } + + const ColumnWithTypeAndName & getByName(const std::string & name) const + { + chassert(block); + return block.getByName(name); + } + + /// Filters selector by mask discarding rows for which filter is false + void filter(const IColumn::Filter & filter) + { + chassert(block && block.rows() == filter.size()); + auto * it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); + selector.resize(std::distance(selector.begin(), it)); + } + + /// Applies selector to block in place + void filterBySelector() + { + chassert(block); + + if (!wasScattered()) + return; + + auto columns = block.getColumns(); + for (auto & col : columns) + { + auto c = col->cloneEmpty(); + c->reserve(selector.size()); + /// TODO: create new method in IColumnHelper to devirtualize + for (const auto idx : selector) + c->insertFrom(*col, idx); + col = std::move(c); + } + + /// We have to to id that way because references to the block should remain valid + block.setColumns(columns); + selector = createTrivialSelector(block.rows()); + } + + /// Cut first num_rows rows from block in place and returns block with remaining rows + ScatteredBlock cut(size_t num_rows) + { + SCOPE_EXIT(filterBySelector()); + + if (num_rows >= rows()) + return ScatteredBlock{block.cloneEmpty()}; + + chassert(block); + + IColumn::Selector remaining_selector(selector.begin() + num_rows, selector.end()); + auto remaining = ScatteredBlock{block, std::move(remaining_selector)}; + + selector.erase(selector.begin() + num_rows, selector.end()); + + return remaining; + } + + void replicate(const IColumn::Offsets & offsets, size_t existing_columns, const std::vector & right_keys_to_replicate) + { + chassert(block); + chassert(offsets.size() == rows()); + + auto columns = block.getColumns(); + for (size_t i = 0; i < existing_columns; ++i) + { + auto c = columns[i]->replicate(offsets); + columns[i] = std::move(c); + } + for (size_t pos : right_keys_to_replicate) + { + auto c = columns[pos]->replicate(offsets); + columns[pos] = std::move(c); + } + + block.setColumns(columns); + selector = createTrivialSelector(block.rows()); + } + +private: + IColumn::Selector createTrivialSelector(size_t size) + { + IColumn::Selector res(size); + std::iota(res.begin(), res.end(), 0); + return res; + } + + Block block; + IColumn::Selector selector; +}; + +using ScatteredBlocks = std::vector; + +} From 662c67dcc0857dff92153a0ec6c81bfd564069ad Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 22:07:29 +0100 Subject: [PATCH 024/267] stash doesn't work --- src/Interpreters/ConcurrentHashJoin.cpp | 6 +- src/Interpreters/ConcurrentHashJoin.h | 2 +- src/Interpreters/HashJoin/AddedColumns.cpp | 2 +- src/Interpreters/HashJoin/AddedColumns.h | 8 +- src/Interpreters/HashJoin/HashJoinMethods.h | 11 +- src/Interpreters/HashJoin/ScatteredBlock.h | 169 ++++++++++++++++++-- 6 files changed, 171 insertions(+), 27 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index ccbcb00a6a8..e393b0b6e50 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -55,7 +55,7 @@ void updateStatistics(const auto & hash_joins, const DB::StatsCollectingParams & DB::getHashTablesStatistics().update(sum_of_sizes, *median_size, params); } -Block concatenateBlocks(const HashJoin::ScatteredBlocks & blocks) +Block concatenateBlocks(const ScatteredBlocks & blocks) { Blocks inner_blocks; for (const auto & block : blocks) @@ -332,7 +332,7 @@ IColumn::Selector selectDispatchBlock(size_t num_shards, const Strings & key_col return hashToSelector(hash, num_shards); } -HashJoin::ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) +ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) { size_t num_shards = hash_joins.size(); IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); @@ -344,7 +344,7 @@ HashJoin::ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_ const size_t shard = selector[i]; selectors[shard].push_back(i); } - HashJoin::ScatteredBlocks result; + ScatteredBlocks result; result.reserve(num_shards); for (size_t i = 0; i < num_shards; ++i) result.emplace_back(from_block, std::move(selectors[i])); diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 5c32ebb32f0..b6db783beb1 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -78,7 +78,7 @@ private: std::mutex totals_mutex; Block totals; - HashJoin::ScatteredBlocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); + ScatteredBlocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); }; UInt64 calculateCacheKey(std::shared_ptr & table_join, const QueryTreeNodePtr & right_table_expression); diff --git a/src/Interpreters/HashJoin/AddedColumns.cpp b/src/Interpreters/HashJoin/AddedColumns.cpp index 78b8602ce0e..19b59d86466 100644 --- a/src/Interpreters/HashJoin/AddedColumns.cpp +++ b/src/Interpreters/HashJoin/AddedColumns.cpp @@ -4,7 +4,7 @@ namespace DB { JoinOnKeyColumns::JoinOnKeyColumns( - const HashJoin::ScatteredBlock & block_, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_) + const ScatteredBlock & block_, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_) : block(block_) , key_names(key_names_) /// Rare case, when keys are constant or low cardinality. To avoid code bloat, simply materialize them. diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 91db042bdb5..137d5febc46 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -14,7 +14,7 @@ using ExpressionActionsPtr = std::shared_ptr; struct JoinOnKeyColumns { - const HashJoin::ScatteredBlock & block; + const ScatteredBlock & block; Names key_names; @@ -30,7 +30,7 @@ struct JoinOnKeyColumns Sizes key_sizes; JoinOnKeyColumns( - const HashJoin::ScatteredBlock & block, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_); + const ScatteredBlock & block, const Names & key_names_, const String & cond_column_name, const Sizes & key_sizes_); bool isRowFiltered(size_t i) const { @@ -62,7 +62,7 @@ public: }; AddedColumns( - const HashJoin::ScatteredBlock & left_block_, + const ScatteredBlock & left_block_, const Block & block_with_columns_to_add, const Block & saved_block_sample, const HashJoin & join, @@ -142,7 +142,7 @@ public: const IColumn & leftAsofKey() const { return *left_asof_key; } - const HashJoin::ScatteredBlock & src_block; + const ScatteredBlock & src_block; Block left_block; std::vector join_on_keys; ExpressionActionsPtr additional_filter_expression; diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 7a290087b5e..0c4bc02970b 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -11,6 +11,7 @@ #include #include +#include "Interpreters/HashJoin/ScatteredBlock.h" namespace DB @@ -77,7 +78,7 @@ public: const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, - const IColumn::Selector & selector, + const ScatteredBlock::Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, @@ -113,7 +114,7 @@ public: const MapsTemplateVector & maps_, bool is_join_get = false) { - HashJoin::ScatteredBlock scattered_block{block}; + ScatteredBlock scattered_block{block}; auto ret = joinBlockImpl(join, scattered_block, block_with_columns_to_add, maps_, is_join_get); ret.filterBySelector(); scattered_block.filterBySelector(); @@ -121,9 +122,9 @@ public: return ret.getSourceBlock(); } - static HashJoin::ScatteredBlock joinBlockImpl( + static ScatteredBlock joinBlockImpl( const HashJoin & join, - HashJoin::ScatteredBlock & block, + ScatteredBlock & block, const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get = false) @@ -247,7 +248,7 @@ private: const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, - const IColumn::Selector & selector, + const ScatteredBlock::Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 8dc3c60eb07..de86ca6659a 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -3,24 +3,161 @@ #include #include #include +#include "base/defines.h" +#include #include +#include +#include + namespace DB { +namespace detail +{ + +class Selector +{ +public: + using Range = std::pair; + + /// [begin, end) + Selector(size_t begin, size_t end) : data(Range{begin, end}) { } + Selector() : Selector(0, 0) { } + + Selector(IColumn::Selector && selector_) : data(initializeFromSelector(std::move(selector_))) { } + + class Iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = size_t; + using difference_type = std::ptrdiff_t; + using pointer = size_t *; + using reference = size_t &; + + Iterator(const Selector & selector_, size_t idx_) : selector(selector_), idx(idx_) { } + + size_t operator*() const + { + chassert(idx < selector.size()); + if (idx >= selector.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", idx, selector.size()); + return selector[idx]; + } + + Iterator & operator++() + { + if (idx >= selector.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", idx, selector.size()); + ++idx; + return *this; + } + + bool operator!=(const Iterator & other) const { return idx != other.idx; } + + private: + const Selector & selector; + size_t idx; + }; + + Iterator begin() const { return Iterator(*this, 0); } + + Iterator end() const { return Iterator(*this, size()); } + + size_t operator[](size_t idx) const + { + chassert(idx < size()); + if (idx >= size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", idx, size()); + + if (std::holds_alternative(data)) + { + auto range = std::get(data); + return range.first + idx; + } + else + { + return std::get(data)[idx]; + } + } + + size_t size() const + { + if (std::holds_alternative(data)) + { + auto range = std::get(data); + return range.second - range.first; + } + else + { + return std::get(data).size(); + } + } + + std::pair split(size_t num_rows) + { + if (num_rows > size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", num_rows, size()); + + if (std::holds_alternative(data)) + { + auto range = std::get(data); + + if (num_rows == 0) + return {Selector(), Selector{range.first, range.second}}; + + if (num_rows == size()) + return {Selector{range.first, range.second}, Selector()}; + + return {Selector(range.first, range.first + num_rows), Selector(range.first + num_rows, range.second)}; + } + else + { + auto & selector = std::get(data); + return { + Selector(IColumn::Selector(selector.begin(), selector.begin() + num_rows)), + Selector(IColumn::Selector(selector.begin() + num_rows, selector.end()))}; + } + } + +private: + using Data = std::variant; + + Data initializeFromSelector(IColumn::Selector && selector) + { + if (selector.empty()) + return Range{0, 0}; + + /// selector represents continuous range + if (selector.back() == selector.front() + selector.size() - 1) + return Range{selector.front(), selector.front() + selector.size()}; + + return std::move(selector); + } + + Data data; +}; + +} + struct ScatteredBlock : private boost::noncopyable { + using Selector = detail::Selector; + ScatteredBlock() = default; explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(createTrivialSelector(block.rows())) { } ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } + ScatteredBlock(Block block_, Selector selector_) : block(std::move(block_)), selector(std::move(selector_)) { } + ScatteredBlock(ScatteredBlock && other) noexcept : block(std::move(other.block)), selector(std::move(other.selector)) { other.block.clear(); - other.selector.clear(); + other.selector = {}; } ScatteredBlock & operator=(ScatteredBlock && other) noexcept @@ -31,7 +168,7 @@ struct ScatteredBlock : private boost::noncopyable selector = std::move(other.selector); other.block.clear(); - other.selector.clear(); + other.selector = {}; } return *this; } @@ -67,8 +204,10 @@ struct ScatteredBlock : private boost::noncopyable void filter(const IColumn::Filter & filter) { chassert(block && block.rows() == filter.size()); - auto * it = std::remove_if(selector.begin(), selector.end(), [&](size_t idx) { return !filter[idx]; }); - selector.resize(std::distance(selector.begin(), it)); + IColumn::Selector new_selector; + new_selector.reserve(selector.size()); + std::copy_if(selector.begin(), selector.end(), std::back_inserter(new_selector), [&](size_t idx) { return filter[idx]; }); + selector = std::move(new_selector); } /// Applies selector to block in place @@ -105,10 +244,19 @@ struct ScatteredBlock : private boost::noncopyable chassert(block); - IColumn::Selector remaining_selector(selector.begin() + num_rows, selector.end()); + LOG_DEBUG(&Poco::Logger::get("debug"), "selector=({})", fmt::join(selector, ",")); + + auto && [first_num_rows, remaining_selector] = selector.split(num_rows); + + LOG_DEBUG( + &Poco::Logger::get("debug"), + "first_num_rows=({}), remaining_selector=({})", + fmt::join(first_num_rows, ","), + fmt::join(remaining_selector, ",")); + auto remaining = ScatteredBlock{block, std::move(remaining_selector)}; - selector.erase(selector.begin() + num_rows, selector.end()); + selector = std::move(first_num_rows); return remaining; } @@ -135,15 +283,10 @@ struct ScatteredBlock : private boost::noncopyable } private: - IColumn::Selector createTrivialSelector(size_t size) - { - IColumn::Selector res(size); - std::iota(res.begin(), res.end(), 0); - return res; - } + Selector createTrivialSelector(size_t size) { return Selector(0, size - 1); } Block block; - IColumn::Selector selector; + Selector selector; }; using ScatteredBlocks = std::vector; From 309ee4351f84b17ffafad81d5ae95e98ab688336 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 23:01:46 +0100 Subject: [PATCH 025/267] stash does work --- src/Interpreters/HashJoin/ScatteredBlock.h | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index de86ca6659a..7e606e91a1f 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -244,16 +244,8 @@ struct ScatteredBlock : private boost::noncopyable chassert(block); - LOG_DEBUG(&Poco::Logger::get("debug"), "selector=({})", fmt::join(selector, ",")); - auto && [first_num_rows, remaining_selector] = selector.split(num_rows); - LOG_DEBUG( - &Poco::Logger::get("debug"), - "first_num_rows=({}), remaining_selector=({})", - fmt::join(first_num_rows, ","), - fmt::join(remaining_selector, ",")); - auto remaining = ScatteredBlock{block, std::move(remaining_selector)}; selector = std::move(first_num_rows); @@ -283,7 +275,7 @@ struct ScatteredBlock : private boost::noncopyable } private: - Selector createTrivialSelector(size_t size) { return Selector(0, size - 1); } + Selector createTrivialSelector(size_t size) { return Selector(0, size); } Block block; Selector selector; From 7596d5a0cac1ca06aa6620d9c0857f542339f556 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 11 Aug 2024 19:52:35 +0100 Subject: [PATCH 026/267] fix style --- src/Interpreters/HashJoin/ScatteredBlock.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 7e606e91a1f..5e67075b91b 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -14,6 +14,11 @@ namespace DB { +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +} + namespace detail { From 101eeae888e7db22b81096173f2ecd91bf9b2807 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Aug 2024 19:23:02 +0100 Subject: [PATCH 027/267] optimize --- src/Interpreters/HashJoin/ScatteredBlock.h | 43 +++++++++++++++++++--- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 5e67075b91b..3668750f044 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -29,6 +29,7 @@ public: /// [begin, end) Selector(size_t begin, size_t end) : data(Range{begin, end}) { } + Selector(size_t size) : Selector(0, size) { } Selector() : Selector(0, 0) { } Selector(IColumn::Selector && selector_) : data(initializeFromSelector(std::move(selector_))) { } @@ -127,6 +128,28 @@ public: } } + bool isContinuousRange() const { return std::holds_alternative(data); } + + Range getRange() const + { + chassert(isContinuousRange()); + return std::get(data); + } + + std::string toString() const + { + if (std::holds_alternative(data)) + { + auto range = std::get(data); + return fmt::format("[{}, {})", range.first, range.second); + } + else + { + auto & selector = std::get(data); + return fmt::format("({})", fmt::join(selector, ",")); + } + } + private: using Data = std::variant; @@ -153,7 +176,7 @@ struct ScatteredBlock : private boost::noncopyable ScatteredBlock() = default; - explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(createTrivialSelector(block.rows())) { } + explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(block.rows()) { } ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } @@ -223,6 +246,18 @@ struct ScatteredBlock : private boost::noncopyable if (!wasScattered()) return; + if (selector.isContinuousRange()) + { + const auto range = selector.getRange(); + for (size_t i = 0; i < block.columns(); ++i) + { + auto & col = block.getByPosition(i); + col.column = col.column->cut(range.first, range.second - range.first); + } + selector = Selector(block.rows()); + return; + } + auto columns = block.getColumns(); for (auto & col : columns) { @@ -236,7 +271,7 @@ struct ScatteredBlock : private boost::noncopyable /// We have to to id that way because references to the block should remain valid block.setColumns(columns); - selector = createTrivialSelector(block.rows()); + selector = Selector(block.rows()); } /// Cut first num_rows rows from block in place and returns block with remaining rows @@ -276,12 +311,10 @@ struct ScatteredBlock : private boost::noncopyable } block.setColumns(columns); - selector = createTrivialSelector(block.rows()); + selector = Selector(block.rows()); } private: - Selector createTrivialSelector(size_t size) { return Selector(0, size); } - Block block; Selector selector; }; From 532bda833444fb81dcea1a34d52a446dc3a1fbaf Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 13:42:35 +0100 Subject: [PATCH 028/267] optimize --- src/Interpreters/HashJoin/ScatteredBlock.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 3668750f044..4952ce1808c 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -45,7 +45,7 @@ public: Iterator(const Selector & selector_, size_t idx_) : selector(selector_), idx(idx_) { } - size_t operator*() const + size_t ALWAYS_INLINE operator*() const { chassert(idx < selector.size()); if (idx >= selector.size()) @@ -53,7 +53,7 @@ public: return selector[idx]; } - Iterator & operator++() + Iterator & ALWAYS_INLINE operator++() { if (idx >= selector.size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", idx, selector.size()); @@ -61,7 +61,7 @@ public: return *this; } - bool operator!=(const Iterator & other) const { return idx != other.idx; } + bool ALWAYS_INLINE operator!=(const Iterator & other) const { return idx != other.idx; } private: const Selector & selector; @@ -72,7 +72,7 @@ public: Iterator end() const { return Iterator(*this, size()); } - size_t operator[](size_t idx) const + size_t ALWAYS_INLINE operator[](size_t idx) const { chassert(idx < size()); if (idx >= size()) From 1c6eafbfcfeb75d6e01d8749687f1a03fc181d65 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 14:21:25 +0100 Subject: [PATCH 029/267] stash --- src/Interpreters/HashJoin/ScatteredBlock.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 4952ce1808c..96ec29759e6 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -75,8 +75,6 @@ public: size_t ALWAYS_INLINE operator[](size_t idx) const { chassert(idx < size()); - if (idx >= size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", idx, size()); if (std::holds_alternative(data)) { @@ -104,8 +102,7 @@ public: std::pair split(size_t num_rows) { - if (num_rows > size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} out of range size {}", num_rows, size()); + chassert(num_rows <= size()); if (std::holds_alternative(data)) { From e3caa59f839e345427359f4eae6661c0342fd6c3 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 21:09:17 +0100 Subject: [PATCH 030/267] too expensive --- src/Interpreters/HashJoin/AddedColumns.h | 1 - src/Interpreters/HashJoin/ScatteredBlock.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 137d5febc46..08b5f71f222 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -34,7 +34,6 @@ struct JoinOnKeyColumns bool isRowFiltered(size_t i) const { - chassert(block.contains(i), fmt::format("Row {} is not in block", i)); return join_mask_column.isRowFiltered(i); } }; diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 96ec29759e6..bde5796ae4c 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -205,8 +205,6 @@ struct ScatteredBlock : private boost::noncopyable const auto & getSelector() const { return selector; } - bool contains(size_t idx) const { return std::find(selector.begin(), selector.end(), idx) != selector.end(); } - explicit operator bool() const { return !!block; } /// Accounts only selected rows From 9c55ad799bd92d95c627a10ea895c20b73c47eca Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 14 Aug 2024 16:41:37 +0100 Subject: [PATCH 031/267] stash half --- src/Interpreters/HashJoin/HashJoinMethods.h | 22 +++++++++++++++ .../HashJoin/HashJoinMethodsImpl.h | 28 ++++++++++++++++++- src/Interpreters/HashJoin/ScatteredBlock.h | 6 ++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 298671b8271..6e741628234 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -108,6 +108,20 @@ private: Arena & pool, bool & is_inserted); + template + static size_t insertFromBlockImplTypeCase( + HashJoin & join, + HashMap & map, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + const Selector & selector, + size_t rows, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted); + template static size_t switchJoinRightColumns( const std::vector & mapv, @@ -138,6 +152,14 @@ private: AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags); + template + static size_t joinRightColumns( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags, + const Selector & selector); + template static void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]); diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index e3f57e5bf64..5c328c66f95 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -1,5 +1,8 @@ #pragma once +#include #include +#include "Columns/IColumn.h" +#include "Interpreters/HashJoin/ScatteredBlock.h" namespace DB { @@ -334,6 +337,24 @@ size_t HashJoinMethods::joinRightColumns( const std::vector & mapv, AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags) +{ + auto & block = added_columns.src_block; + if (block.getSelector().isContinuousRange()) + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getRange()); + else + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); +} + +template +template +size_t HashJoinMethods::joinRightColumns( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags, + const Selector & selector) { constexpr JoinFeatures join_features; @@ -352,7 +373,12 @@ size_t HashJoinMethods::joinRightColumns( size_t i = 0; for (; i < rows; ++i) { - const auto ind = block.getSelector()[i]; + size_t ind = 0; + if constexpr (std::is_same_v, IColumn::Selector>) + ind = selector[i]; + else + ind = selector.first + i; + if constexpr (join_features.need_replication) { if (unlikely(current_offset >= max_joined_block_rows)) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index bde5796ae4c..f28126212d7 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -133,6 +133,12 @@ public: return std::get(data); } + const IColumn::Selector & getSelector() const + { + chassert(!isContinuousRange()); + return std::get(data); + } + std::string toString() const { if (std::holds_alternative(data)) From 4f3ff0ecbd860e62cd8026811c06dcf64d1c25d8 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 14 Aug 2024 19:31:54 +0100 Subject: [PATCH 032/267] second half --- src/Interpreters/HashJoin/HashJoinMethods.h | 21 ------ .../HashJoin/HashJoinMethodsImpl.h | 69 +++++++++++-------- 2 files changed, 42 insertions(+), 48 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 6e741628234..c44a03aa15e 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -95,19 +95,6 @@ private: template static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes); - template - static size_t insertFromBlockImplTypeCase( - HashJoin & join, - HashMap & map, - const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, - Block * stored_block, - const ScatteredBlock::Selector & selector, - ConstNullMapPtr null_map, - UInt8ColumnDataPtr join_mask, - Arena & pool, - bool & is_inserted); - template static size_t insertFromBlockImplTypeCase( HashJoin & join, @@ -116,7 +103,6 @@ private: const Sizes & key_sizes, Block * stored_block, const Selector & selector, - size_t rows, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, @@ -145,13 +131,6 @@ private: /// Joins right table columns which indexes are present in right_indexes using specified map. /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). - template - static size_t joinRightColumns( - std::vector && key_getter_vector, - const std::vector & mapv, - AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags); - template static size_t joinRightColumns( std::vector && key_getter_vector, diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 5c328c66f95..6292bce5425 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -36,9 +36,14 @@ size_t HashJoinMethods::insertFromBlockImpl( #define M(TYPE) \ case HashJoin::Type::TYPE: \ - return insertFromBlockImplTypeCase< \ - typename KeyGetterForType>::Type>( \ - join, *maps.TYPE, key_columns, key_sizes, stored_block, selector, null_map, join_mask, pool, is_inserted); \ + if (selector.isContinuousRange()) \ + return insertFromBlockImplTypeCase< \ + typename KeyGetterForType>::Type>( \ + join, *maps.TYPE, key_columns, key_sizes, stored_block, selector.getRange(), null_map, join_mask, pool, is_inserted); \ + else \ + return insertFromBlockImplTypeCase< \ + typename KeyGetterForType>::Type>( \ + join, *maps.TYPE, key_columns, key_sizes, stored_block, selector.getSelector(), null_map, join_mask, pool, is_inserted); \ break; APPLY_FOR_JOIN_VARIANTS(M) @@ -179,14 +184,14 @@ KeyGetter HashJoinMethods::createKeyGetter(const } template -template +template size_t HashJoinMethods::insertFromBlockImplTypeCase( HashJoin & join, HashMap & map, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, - const ScatteredBlock::Selector & selector, + const Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, @@ -204,8 +209,20 @@ size_t HashJoinMethods::insertFromBlockImplTypeC /// For ALL and ASOF join always insert values is_inserted = !mapped_one || is_asof_join; - for (size_t ind : selector) + size_t rows = 0; + if constexpr (std::is_same_v, IColumn::Selector>) + rows = selector.size(); + else + rows = selector.second - selector.first; + + for (size_t i = 0; i < rows; ++i) { + size_t ind = 0; + if constexpr (std::is_same_v, IColumn::Selector>) + ind = selector[i]; + else + ind = selector.first + i; + chassert(!null_map || ind < null_map->size()); if (null_map && (*null_map)[ind]) { @@ -321,32 +338,30 @@ size_t HashJoinMethods::joinRightColumnsSwitchMu if (added_columns.additional_filter_expression) throw Exception(ErrorCodes::LOGICAL_ERROR, "Additional filter expression is not supported for this JOIN"); - return mapv.size() > 1 ? joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags) - : joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); + auto & block = added_columns.src_block; + if (block.getSelector().isContinuousRange()) + { + if (mapv.size() > 1) + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getRange()); + else + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getRange()); + } + else + { + if (mapv.size() > 1) + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); + else + return joinRightColumns( + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); + } } /// Joins right table columns which indexes are present in right_indexes using specified map. /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). -template -template -size_t HashJoinMethods::joinRightColumns( - std::vector && key_getter_vector, - const std::vector & mapv, - AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) -{ - auto & block = added_columns.src_block; - if (block.getSelector().isContinuousRange()) - return joinRightColumns( - std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getRange()); - else - return joinRightColumns( - std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); -} - template template size_t HashJoinMethods::joinRightColumns( From 3f5f59242e3b93a673fa91b9cdbab26b7c5e1cbd Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 15 Aug 2024 20:06:14 +0100 Subject: [PATCH 033/267] fix build --- src/Interpreters/HashJoin/HashJoinMethodsImpl.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index a222fed0a6e..a6c11f60cf4 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -429,16 +429,8 @@ size_t HashJoinMethods::joinRightColumns( { const IColumn & left_asof_key = added_columns.leftAsofKey(); -<<<<<<< HEAD auto row_ref = mapped->findAsof(left_asof_key, ind); - if (row_ref.block) -||||||| 014c1650f8d - auto row_ref = mapped->findAsof(left_asof_key, i); - if (row_ref.block) -======= - auto row_ref = mapped->findAsof(left_asof_key, i); if (row_ref && row_ref->block) ->>>>>>> master { setUsed(added_columns.filter, i); if constexpr (flag_per_row) From 95928475873204e55d2cef950e6abb2363806178 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 13:38:02 +0100 Subject: [PATCH 034/267] use index() --- src/Columns/ColumnVector.h | 1 + src/Interpreters/ConcurrentHashJoin.cpp | 10 ++-- .../HashJoin/HashJoinMethodsImpl.h | 18 +++---- src/Interpreters/HashJoin/ScatteredBlock.h | 54 +++++++++---------- 4 files changed, 44 insertions(+), 39 deletions(-) diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index a5e1ee4b462..6f998f804ec 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -52,6 +52,7 @@ private: explicit ColumnVector(const size_t n) : data(n) {} ColumnVector(const size_t n, const ValueType x) : data(n, x) {} ColumnVector(const ColumnVector & src) : data(src.data.begin(), src.data.end()) {} + ColumnVector(Container::iterator begin, Container::iterator end) : data(begin, end) { } /// Sugar constructor. ColumnVector(std::initializer_list il) : data{il} {} diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index e393b0b6e50..43f0eb9aa85 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -23,6 +23,7 @@ #include #include #include +#include "Interpreters/HashJoin/ScatteredBlock.h" using namespace DB; @@ -336,13 +337,16 @@ ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_na { size_t num_shards = hash_joins.size(); IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); - std::vector selectors(num_shards); + std::vector selectors(num_shards); for (size_t i = 0; i < num_shards; ++i) - selectors[i].reserve(selector.size() / num_shards + 1); + { + selectors[i] = ScatteredBlock::Indexes::create(); + selectors[i]->reserve(selector.size() / num_shards + 1); + } for (size_t i = 0; i < selector.size(); ++i) { const size_t shard = selector[i]; - selectors[shard].push_back(i); + selectors[shard]->getData().push_back(i); } ScatteredBlocks result; result.reserve(num_shards); diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index a6c11f60cf4..47e1fa49697 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -43,7 +43,7 @@ size_t HashJoinMethods::insertFromBlockImpl( else \ return insertFromBlockImplTypeCase< \ typename KeyGetterForType>::Type>( \ - join, *maps.TYPE, key_columns, key_sizes, stored_block, selector.getSelector(), null_map, join_mask, pool, is_inserted); \ + join, *maps.TYPE, key_columns, key_sizes, stored_block, selector.getIndexes(), null_map, join_mask, pool, is_inserted); \ break; APPLY_FOR_JOIN_VARIANTS(M) @@ -213,16 +213,16 @@ size_t HashJoinMethods::insertFromBlockImplTypeC is_inserted = !mapped_one || is_asof_join; size_t rows = 0; - if constexpr (std::is_same_v, IColumn::Selector>) - rows = selector.size(); + if constexpr (std::is_same_v, ScatteredBlock::Indexes>) + rows = selector.getData().size(); else rows = selector.second - selector.first; for (size_t i = 0; i < rows; ++i) { size_t ind = 0; - if constexpr (std::is_same_v, IColumn::Selector>) - ind = selector[i]; + if constexpr (std::is_same_v, ScatteredBlock::Indexes>) + ind = selector.getData()[i]; else ind = selector.first + i; @@ -355,10 +355,10 @@ size_t HashJoinMethods::joinRightColumnsSwitchMu { if (mapv.size() > 1) return joinRightColumns( - std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getIndexes()); else return joinRightColumns( - std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getSelector()); + std::move(key_getter_vector), mapv, added_columns, used_flags, block.getSelector().getIndexes()); } } @@ -394,8 +394,8 @@ size_t HashJoinMethods::joinRightColumns( for (; i < rows; ++i) { size_t ind = 0; - if constexpr (std::is_same_v, IColumn::Selector>) - ind = selector[i]; + if constexpr (std::is_same_v, ScatteredBlock::Indexes>) + ind = selector.getData()[i]; else ind = selector.first + i; diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index f28126212d7..69456898394 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -1,9 +1,11 @@ #pragma once +#include +#include #include #include +#include #include -#include "base/defines.h" #include #include @@ -26,13 +28,15 @@ class Selector { public: using Range = std::pair; + using Indexes = ColumnUInt64; + using IndexesPtr = ColumnUInt64::MutablePtr; /// [begin, end) Selector(size_t begin, size_t end) : data(Range{begin, end}) { } Selector(size_t size) : Selector(0, size) { } Selector() : Selector(0, 0) { } - Selector(IColumn::Selector && selector_) : data(initializeFromSelector(std::move(selector_))) { } + Selector(IndexesPtr && selector_) : data(initializeFromSelector(std::move(selector_))) { } class Iterator { @@ -83,7 +87,7 @@ public: } else { - return std::get(data)[idx]; + return std::get(data)->getData()[idx]; } } @@ -96,7 +100,7 @@ public: } else { - return std::get(data).size(); + return std::get(data)->size(); } } @@ -118,10 +122,10 @@ public: } else { - auto & selector = std::get(data); + auto & selector = std::get(data)->getData(); return { - Selector(IColumn::Selector(selector.begin(), selector.begin() + num_rows)), - Selector(IColumn::Selector(selector.begin() + num_rows, selector.end()))}; + Selector(Indexes::create(selector.begin(), selector.begin() + num_rows)), + Selector(Indexes::create(selector.begin() + num_rows, selector.end()))}; } } @@ -133,10 +137,10 @@ public: return std::get(data); } - const IColumn::Selector & getSelector() const + const Indexes & getIndexes() const { chassert(!isContinuousRange()); - return std::get(data); + return *std::get(data); } std::string toString() const @@ -148,16 +152,17 @@ public: } else { - auto & selector = std::get(data); + auto & selector = std::get(data)->getData(); return fmt::format("({})", fmt::join(selector, ",")); } } private: - using Data = std::variant; + using Data = std::variant; - Data initializeFromSelector(IColumn::Selector && selector) + Data initializeFromSelector(IndexesPtr && selector_) { + const auto & selector = selector_->getData(); if (selector.empty()) return Range{0, 0}; @@ -165,7 +170,7 @@ private: if (selector.back() == selector.front() + selector.size() - 1) return Range{selector.front(), selector.front() + selector.size()}; - return std::move(selector); + return std::move(selector_); } Data data; @@ -176,12 +181,14 @@ private: struct ScatteredBlock : private boost::noncopyable { using Selector = detail::Selector; + using Indexes = Selector::Indexes; + using IndexesPtr = Selector::IndexesPtr; ScatteredBlock() = default; explicit ScatteredBlock(Block block_) : block(std::move(block_)), selector(block.rows()) { } - ScatteredBlock(Block block_, IColumn::Selector && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } + ScatteredBlock(Block block_, IndexesPtr && selector_) : block(std::move(block_)), selector(std::move(selector_)) { } ScatteredBlock(Block block_, Selector selector_) : block(std::move(block_)), selector(std::move(selector_)) { } @@ -233,9 +240,10 @@ struct ScatteredBlock : private boost::noncopyable void filter(const IColumn::Filter & filter) { chassert(block && block.rows() == filter.size()); - IColumn::Selector new_selector; - new_selector.reserve(selector.size()); - std::copy_if(selector.begin(), selector.end(), std::back_inserter(new_selector), [&](size_t idx) { return filter[idx]; }); + IndexesPtr new_selector = Indexes::create(); + new_selector->reserve(selector.size()); + std::copy_if( + selector.begin(), selector.end(), std::back_inserter(new_selector->getData()), [&](size_t idx) { return filter[idx]; }); selector = std::move(new_selector); } @@ -259,18 +267,10 @@ struct ScatteredBlock : private boost::noncopyable return; } + /// The general case when selector is non-trivial (likely the result of applying a filter) auto columns = block.getColumns(); for (auto & col : columns) - { - auto c = col->cloneEmpty(); - c->reserve(selector.size()); - /// TODO: create new method in IColumnHelper to devirtualize - for (const auto idx : selector) - c->insertFrom(*col, idx); - col = std::move(c); - } - - /// We have to to id that way because references to the block should remain valid + col = col->index(selector.getIndexes(), /*limit*/ 0); block.setColumns(columns); selector = Selector(block.rows()); } From 42f67904d64f860c18c2e1189ed75cdc046b4185 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 21:05:19 +0100 Subject: [PATCH 035/267] better --- src/Columns/ColumnVector.h | 2 +- src/Interpreters/ConcurrentHashJoin.cpp | 2 +- src/Interpreters/HashJoin/ScatteredBlock.h | 43 ++++++++++------------ 3 files changed, 21 insertions(+), 26 deletions(-) diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 6f998f804ec..415a83ad203 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -52,7 +52,7 @@ private: explicit ColumnVector(const size_t n) : data(n) {} ColumnVector(const size_t n, const ValueType x) : data(n, x) {} ColumnVector(const ColumnVector & src) : data(src.data.begin(), src.data.end()) {} - ColumnVector(Container::iterator begin, Container::iterator end) : data(begin, end) { } + ColumnVector(Container::const_iterator begin, Container::const_iterator end) : data(begin, end) { } /// Sugar constructor. ColumnVector(std::initializer_list il) : data{il} {} diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 43f0eb9aa85..77b3bec34dd 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -23,7 +24,6 @@ #include #include #include -#include "Interpreters/HashJoin/ScatteredBlock.h" using namespace DB; diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 69456898394..79d7e8bb7f0 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -31,12 +31,13 @@ public: using Indexes = ColumnUInt64; using IndexesPtr = ColumnUInt64::MutablePtr; - /// [begin, end) - Selector(size_t begin, size_t end) : data(Range{begin, end}) { } - Selector(size_t size) : Selector(0, size) { } Selector() : Selector(0, 0) { } - Selector(IndexesPtr && selector_) : data(initializeFromSelector(std::move(selector_))) { } + /// [begin, end) + Selector(size_t begin, size_t end) : data(Range{begin, end}) { } + + explicit Selector(size_t size) : Selector(0, size) { } + explicit Selector(IndexesPtr && selector_) : data(initializeFromSelector(std::move(selector_))) { } class Iterator { @@ -82,7 +83,7 @@ public: if (std::holds_alternative(data)) { - auto range = std::get(data); + const auto range = std::get(data); return range.first + idx; } else @@ -95,7 +96,7 @@ public: { if (std::holds_alternative(data)) { - auto range = std::get(data); + const auto range = std::get(data); return range.second - range.first; } else @@ -110,7 +111,7 @@ public: if (std::holds_alternative(data)) { - auto range = std::get(data); + const auto range = std::get(data); if (num_rows == 0) return {Selector(), Selector{range.first, range.second}}; @@ -122,10 +123,10 @@ public: } else { - auto & selector = std::get(data)->getData(); - return { - Selector(Indexes::create(selector.begin(), selector.begin() + num_rows)), - Selector(Indexes::create(selector.begin() + num_rows, selector.end()))}; + const auto & selector = std::get(data)->getData(); + auto && left = Selector(Indexes::create(selector.begin(), selector.begin() + num_rows)); + auto && right = Selector(Indexes::create(selector.begin() + num_rows, selector.end())); + return {std::move(left), std::move(right)}; } } @@ -147,12 +148,12 @@ public: { if (std::holds_alternative(data)) { - auto range = std::get(data); + const auto range = std::get(data); return fmt::format("[{}, {})", range.first, range.second); } else { - auto & selector = std::get(data)->getData(); + const auto & selector = std::get(data)->getData(); return fmt::format("({})", fmt::join(selector, ",")); } } @@ -244,7 +245,7 @@ struct ScatteredBlock : private boost::noncopyable new_selector->reserve(selector.size()); std::copy_if( selector.begin(), selector.end(), std::back_inserter(new_selector->getData()), [&](size_t idx) { return filter[idx]; }); - selector = std::move(new_selector); + selector = Selector(std::move(new_selector)); } /// Applies selector to block in place @@ -299,19 +300,13 @@ struct ScatteredBlock : private boost::noncopyable chassert(block); chassert(offsets.size() == rows()); - auto columns = block.getColumns(); + auto && columns = block.getColumns(); for (size_t i = 0; i < existing_columns; ++i) - { - auto c = columns[i]->replicate(offsets); - columns[i] = std::move(c); - } + columns[i] = columns[i]->replicate(offsets); for (size_t pos : right_keys_to_replicate) - { - auto c = columns[pos]->replicate(offsets); - columns[pos] = std::move(c); - } + columns[pos] = columns[pos]->replicate(offsets); - block.setColumns(columns); + block.setColumns(std::move(columns)); selector = Selector(block.rows()); } From ae6ff4cb6855c2d9b500ef92ff250057e94407b4 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 16 Aug 2024 13:11:12 +0100 Subject: [PATCH 036/267] stash --- src/Interpreters/HashJoin/ScatteredBlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 79d7e8bb7f0..6c2c84e0893 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -282,7 +282,7 @@ struct ScatteredBlock : private boost::noncopyable SCOPE_EXIT(filterBySelector()); if (num_rows >= rows()) - return ScatteredBlock{block.cloneEmpty()}; + return ScatteredBlock{Block{}}; chassert(block); From 97c86b3237fc09456db93079633f2f07eb7fa2c0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 20 Aug 2024 14:14:15 +0100 Subject: [PATCH 037/267] fix assert --- src/Interpreters/HashJoin/ScatteredBlock.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 6c2c84e0893..9847e8c0b7b 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -251,9 +251,7 @@ struct ScatteredBlock : private boost::noncopyable /// Applies selector to block in place void filterBySelector() { - chassert(block); - - if (!wasScattered()) + if (!block || !wasScattered()) return; if (selector.isContinuousRange()) From f62c7012d70f373b0767ff9123f817ef15c61f15 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 20 Aug 2024 16:40:35 +0100 Subject: [PATCH 038/267] fix tidy --- src/Interpreters/HashJoin/ScatteredBlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 9847e8c0b7b..2fc115f8345 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -304,7 +304,7 @@ struct ScatteredBlock : private boost::noncopyable for (size_t pos : right_keys_to_replicate) columns[pos] = columns[pos]->replicate(offsets); - block.setColumns(std::move(columns)); + block.setColumns(columns); selector = Selector(block.rows()); } From 16ddf20c73aac6060e63ce7049e4d2f8d49d4915 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 2 Oct 2024 17:29:30 +0100 Subject: [PATCH 039/267] avoid squashing result blocks --- src/Interpreters/ConcurrentHashJoin.cpp | 25 +++++---- src/Interpreters/ConcurrentHashJoin.h | 3 + src/Interpreters/HashJoin/HashJoin.h | 2 + src/Interpreters/IJoin.h | 9 +++ .../Transforms/JoiningTransform.cpp | 55 ++++++++++++------- src/Processors/Transforms/JoiningTransform.h | 9 ++- 6 files changed, 67 insertions(+), 36 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index a1761911a6c..4b88ef14196 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -56,17 +56,6 @@ void updateStatistics(const auto & hash_joins, const DB::StatsCollectingParams & DB::getHashTablesStatistics().update(sum_of_sizes, *median_size, params); } -Block concatenateBlocks(const ScatteredBlocks & blocks) -{ - Blocks inner_blocks; - for (const auto & block : blocks) - { - chassert(!block.wasScattered(), "Not scattered blocks are expected in join result"); - inner_blocks.push_back(block.getSourceBlock()); - } - return concatenateBlocks(inner_blocks); -} - } namespace DB @@ -227,6 +216,14 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l } void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & /*not_processed*/) +{ + Blocks res; + std::shared_ptr not_processed; + joinBlock(block, res, not_processed); + block = concatenateBlocks(res); +} + +void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std::shared_ptr & /*not_processed*/) { hash_joins[0]->data->materializeColumnsFromLeftBlock(block); @@ -242,7 +239,11 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & throw Exception(ErrorCodes::LOGICAL_ERROR, "not_processed should be empty"); } - block = ::concatenateBlocks(dispatched_blocks); + chassert(res.empty()); + res.clear(); + res.reserve(dispatched_blocks.size()); + std::ranges::transform( + dispatched_blocks, std::back_inserter(res), [](ScatteredBlock & res_block) { return std::move(res_block).getSourceBlock(); }); } void ConcurrentHashJoin::checkTypesOfKeys(const Block & block) const diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index b6db783beb1..33407045c44 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -57,6 +57,9 @@ public: bool alwaysReturnsEmptySet() const override; bool supportParallelJoin() const override { return true; } + bool supportsJoinWithManyResultBlocks() const override { return true; } + void joinBlock(Block & block, std::vector & res, std::shared_ptr & not_processed) override; + IBlocksStreamPtr getNonJoinedBlocks(const Block & left_sample_block, const Block & result_sample_block, UInt64 max_block_size) const override; diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 7915788382d..a76c60aab5a 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -146,6 +146,8 @@ public: void checkTypesOfKeys(const Block & block) const override; + using IJoin::joinBlock; + /** Join data from the map (that was previously built by calls to addBlockToJoin) to the block with data from "left" table. * Could be called from different threads in parallel. */ diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h index 8f648de2538..4bfd43598ec 100644 --- a/src/Interpreters/IJoin.h +++ b/src/Interpreters/IJoin.h @@ -90,6 +90,15 @@ public: /// Could be called from different threads in parallel. virtual void joinBlock(Block & block, std::shared_ptr & not_processed) = 0; + virtual bool supportsJoinWithManyResultBlocks() const { return false; } + virtual void joinBlock( + [[maybe_unused]] Block & block, + [[maybe_unused]] std::vector & res, + [[maybe_unused]] std::shared_ptr & not_processed) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Clone method is not supported for {}", getName()); + } + /** Set/Get totals for right table * Keep "totals" (separate part of dataset, see WITH TOTALS) to use later. */ diff --git a/src/Processors/Transforms/JoiningTransform.cpp b/src/Processors/Transforms/JoiningTransform.cpp index f2fb6327129..ceb95cd7ad1 100644 --- a/src/Processors/Transforms/JoiningTransform.cpp +++ b/src/Processors/Transforms/JoiningTransform.cpp @@ -75,8 +75,9 @@ IProcessor::Status JoiningTransform::prepare() /// Output if has data. if (has_output) { - output.push(std::move(output_chunk)); - has_output = false; + output.push(std::move(output_chunks.front())); + output_chunks.pop_front(); + has_output = !output_chunks.empty(); return Status::PortFull; } @@ -122,10 +123,10 @@ void JoiningTransform::work() { if (has_input) { + chassert(output_chunks.empty()); transform(input_chunk); - output_chunk.swap(input_chunk); has_input = not_processed != nullptr; - has_output = !output_chunk.empty(); + has_output = !output_chunks.empty(); } else { @@ -153,8 +154,7 @@ void JoiningTransform::work() return; } - auto rows = block.rows(); - output_chunk.setColumns(block.getColumns(), rows); + output_chunks.emplace_back(block.getColumns(), block.rows()); has_output = true; } } @@ -173,7 +173,7 @@ void JoiningTransform::transform(Chunk & chunk) } } - Block block; + Blocks res; if (on_totals) { const auto & left_totals = inputs.front().getHeader().cloneWithColumns(chunk.detachColumns()); @@ -184,39 +184,52 @@ void JoiningTransform::transform(Chunk & chunk) if (default_totals && !right_totals) return; - block = outputs.front().getHeader().cloneEmpty(); - JoinCommon::joinTotals(left_totals, right_totals, join->getTableJoin(), block); + res.emplace_back(); + res.back() = outputs.front().getHeader().cloneEmpty(); + JoinCommon::joinTotals(left_totals, right_totals, join->getTableJoin(), res.back()); } else - block = readExecute(chunk); - auto num_rows = block.rows(); - chunk.setColumns(block.getColumns(), num_rows); + res = readExecute(chunk); + + std::ranges::for_each(res, [this](Block & block) { output_chunks.emplace_back(block.getColumns(), block.rows()); }); } -Block JoiningTransform::readExecute(Chunk & chunk) +Blocks JoiningTransform::readExecute(Chunk & chunk) { - Block res; + Blocks res; + Block block; + + auto join_block = [&]() + { + if (join->supportsJoinWithManyResultBlocks()) + join->joinBlock(block, res, not_processed); + else + { + join->joinBlock(block, not_processed); + res.push_back(std::move(block)); + } + }; if (!not_processed) { if (chunk.hasColumns()) - res = inputs.front().getHeader().cloneWithColumns(chunk.detachColumns()); + block = inputs.front().getHeader().cloneWithColumns(chunk.detachColumns()); - if (res) - join->joinBlock(res, not_processed); + if (block) + join_block(); } else if (not_processed->empty()) /// There's not processed data inside expression. { if (chunk.hasColumns()) - res = inputs.front().getHeader().cloneWithColumns(chunk.detachColumns()); + block = inputs.front().getHeader().cloneWithColumns(chunk.detachColumns()); not_processed.reset(); - join->joinBlock(res, not_processed); + join_block(); } else { - res = std::move(not_processed->block); - join->joinBlock(res, not_processed); + block = std::move(not_processed->block); + join_block(); } return res; diff --git a/src/Processors/Transforms/JoiningTransform.h b/src/Processors/Transforms/JoiningTransform.h index 5f6d9d6fff2..c3445cf3e36 100644 --- a/src/Processors/Transforms/JoiningTransform.h +++ b/src/Processors/Transforms/JoiningTransform.h @@ -1,6 +1,9 @@ #pragma once -#include + #include +#include + +#include #include namespace DB @@ -66,7 +69,7 @@ protected: private: Chunk input_chunk; - Chunk output_chunk; + std::deque output_chunks; bool has_input = false; bool has_output = false; bool stop_reading = false; @@ -86,7 +89,7 @@ private: IBlocksStreamPtr non_joined_blocks; size_t max_block_size; - Block readExecute(Chunk & chunk); + Blocks readExecute(Chunk & chunk); }; /// Fills Join with block from right table. From ceeb522b3713041d6b6ab00cedf218e1ef9fe245 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 3 Oct 2024 17:35:09 +0100 Subject: [PATCH 040/267] use old approach for thin blocks --- src/Interpreters/ConcurrentHashJoin.cpp | 53 +++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 4b88ef14196..f2d1d1418a4 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -25,6 +27,8 @@ #include #include +#include + using namespace DB; namespace ProfileEvents @@ -336,10 +340,31 @@ IColumn::Selector selectDispatchBlock(size_t num_shards, const Strings & key_col return hashToSelector(hash, num_shards); } -ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) +ScatteredBlocks scatterBlocksByCopying(size_t num_shards, const IColumn::Selector & selector, const Block & from_block) +{ + Blocks blocks(num_shards); + for (size_t i = 0; i < num_shards; ++i) + blocks[i] = from_block.cloneEmpty(); + + for (size_t i = 0; i < from_block.columns(); ++i) + { + auto dispatched_columns = from_block.getByPosition(i).column->scatter(num_shards, selector); + chassert(blocks.size() == dispatched_columns.size()); + for (size_t block_index = 0; block_index < num_shards; ++block_index) + { + blocks[block_index].getByPosition(i).column = std::move(dispatched_columns[block_index]); + } + } + + ScatteredBlocks result; + result.reserve(num_shards); + for (size_t i = 0; i < num_shards; ++i) + result.emplace_back(std::move(blocks[i])); + return result; +} + +ScatteredBlocks scatterBlocksWithSelector(size_t num_shards, const IColumn::Selector & selector, const Block & from_block) { - size_t num_shards = hash_joins.size(); - IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); std::vector selectors(num_shards); for (size_t i = 0; i < num_shards; ++i) { @@ -358,6 +383,28 @@ ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_na return result; } +ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) +{ + size_t num_shards = hash_joins.size(); + IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); + + /// With zero-copy approach we won't copy the source columns, but will create a new one with indices. + /// This is not beneficial when the whole set of columns is e.g. a single small column. + constexpr auto threshold = sizeof(IColumn::Selector::value_type); + const auto & data_types = from_block.getDataTypes(); + const bool use_zero_copy_approach + = std::accumulate( + data_types.begin(), + data_types.end(), + 0u, + [](size_t sum, const DataTypePtr & type) + { return sum + (type->haveMaximumSizeOfValue() ? type->getMaximumSizeOfValueInMemory() : threshold + 1); }) + > threshold; + + return use_zero_copy_approach ? scatterBlocksWithSelector(num_shards, selector, from_block) + : scatterBlocksByCopying(num_shards, selector, from_block); +} + UInt64 calculateCacheKey(std::shared_ptr & table_join, const QueryTreeNodePtr & right_table_expression) { IQueryTreeNode::HashState hash; From 5cf92fe96404909307759d35fa5f412cd1d5a717 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 14 Oct 2024 14:22:17 +0100 Subject: [PATCH 041/267] better --- .../HashJoin/HashJoinMethodsImpl.h | 15 ++++++- src/Interpreters/HashJoin/ScatteredBlock.h | 40 ++++++------------- 2 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 350b4c7cfc6..17c8c821bef 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -164,8 +164,19 @@ ScatteredBlock HashJoinMethods::joinBlockImpl( if constexpr (join_features.need_replication) { - std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; - block.replicate(*offsets_to_replicate, existing_columns, right_keys_to_replicate); + IColumn::Offsets & offsets = *added_columns.offsets_to_replicate; + + chassert(block); + chassert(offsets.size() == block.rows()); + + auto && columns = block.getSourceBlock().getColumns(); + for (size_t i = 0; i < existing_columns; ++i) + columns[i] = columns[i]->replicate(offsets); + for (size_t pos : right_keys_to_replicate) + columns[pos] = columns[pos]->replicate(offsets); + + block.getSourceBlock().setColumns(columns); + block = ScatteredBlock(std::move(block).getSourceBlock()); } return remaining_block; } diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 2fc115f8345..c114bb957c2 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -1,18 +1,13 @@ #pragma once -#include #include #include #include #include #include -#include #include -#include -#include - namespace DB { @@ -24,6 +19,11 @@ extern const int LOGICAL_ERROR; namespace detail { +/// Previously ConcurrentHashJoin used IColumn::scatter method to split input blocks to sub-blocks by hash. +/// To avoid copying of columns, we introduce a new class ScatteredBlock that holds a block and a selector. +/// So now each threads get a copy of the source input block and a selector that tells which rows are meant for the given thread. +/// Selector can be seen as just a list of indexes or rows that belong to the given thread. +/// One optimization is to use a continuous range instead of explicit list of indexes when selector contains all indexes from [L, R). class Selector { public: @@ -31,12 +31,11 @@ public: using Indexes = ColumnUInt64; using IndexesPtr = ColumnUInt64::MutablePtr; - Selector() : Selector(0, 0) { } - /// [begin, end) Selector(size_t begin, size_t end) : data(Range{begin, end}) { } - + Selector() : Selector(0, 0) { } explicit Selector(size_t size) : Selector(0, size) { } + explicit Selector(IndexesPtr && selector_) : data(initializeFromSelector(std::move(selector_))) { } class Iterator @@ -105,6 +104,7 @@ public: } } + /// First selector contains first `num_rows` rows, second selector contains the rest std::pair split(size_t num_rows) { chassert(num_rows <= size()); @@ -179,6 +179,7 @@ private: } +/// Source block + list of selected rows. See detail::Selector for more details. struct ScatteredBlock : private boost::noncopyable { using Selector = detail::Selector; @@ -224,7 +225,7 @@ struct ScatteredBlock : private boost::noncopyable /// Accounts only selected rows size_t rows() const { return selector.size(); } - /// Whether block was scattered, i.e. has non-trivial selector + /// Whether `block` was scattered, i.e. `selector` != [0, block.rows()) bool wasScattered() const { chassert(block); @@ -248,7 +249,7 @@ struct ScatteredBlock : private boost::noncopyable selector = Selector(std::move(new_selector)); } - /// Applies selector to block in place + /// Applies `selector` to the `block` in-place void filterBySelector() { if (!block || !wasScattered()) @@ -266,7 +267,7 @@ struct ScatteredBlock : private boost::noncopyable return; } - /// The general case when selector is non-trivial (likely the result of applying a filter) + /// The general case when `selector` is non-trivial (likely the result of applying a filter) auto columns = block.getColumns(); for (auto & col : columns) col = col->index(selector.getIndexes(), /*limit*/ 0); @@ -274,7 +275,7 @@ struct ScatteredBlock : private boost::noncopyable selector = Selector(block.rows()); } - /// Cut first num_rows rows from block in place and returns block with remaining rows + /// Cut first `num_rows` rows from `block` in place and returns block with remaining rows ScatteredBlock cut(size_t num_rows) { SCOPE_EXIT(filterBySelector()); @@ -293,21 +294,6 @@ struct ScatteredBlock : private boost::noncopyable return remaining; } - void replicate(const IColumn::Offsets & offsets, size_t existing_columns, const std::vector & right_keys_to_replicate) - { - chassert(block); - chassert(offsets.size() == rows()); - - auto && columns = block.getColumns(); - for (size_t i = 0; i < existing_columns; ++i) - columns[i] = columns[i]->replicate(offsets); - for (size_t pos : right_keys_to_replicate) - columns[pos] = columns[pos]->replicate(offsets); - - block.setColumns(columns); - selector = Selector(block.rows()); - } - private: Block block; Selector selector; From d10b79020edda1f35f9f3637447cd57d90352bae Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 14 Oct 2024 13:39:03 +0000 Subject: [PATCH 042/267] Refactoring TempDataOnDisk --- src/Common/CurrentMetrics.cpp | 1 + .../gtest_cascade_and_memory_write_buffer.cpp | 2 +- src/Interpreters/Aggregator.cpp | 12 +- src/Interpreters/Aggregator.h | 9 +- src/Interpreters/Context.cpp | 20 +- src/Interpreters/GraceHashJoin.cpp | 60 +- src/Interpreters/GraceHashJoin.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 29 +- src/Interpreters/HashJoin/HashJoin.h | 5 +- src/Interpreters/TableJoin.h | 7 +- src/Interpreters/TemporaryDataOnDisk.cpp | 647 ++++++++---------- src/Interpreters/TemporaryDataOnDisk.h | 233 ++++--- src/Interpreters/tests/gtest_filecache.cpp | 85 ++- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 - src/Processors/QueryPlan/SortingStep.cpp | 6 +- .../Transforms/AggregatingTransform.cpp | 27 +- .../Transforms/MergeSortingTransform.cpp | 44 +- .../Transforms/MergeSortingTransform.h | 5 +- src/QueryPipeline/QueryPipelineBuilder.h | 6 + src/QueryPipeline/QueryPlanResourceHolder.h | 2 + src/Server/HTTPHandler.cpp | 39 +- src/Storages/MergeTree/MergeTask.cpp | 124 +--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 37 files changed, 646 insertions(+), 789 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index bd62e7e8aae..8d232e11df3 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -45,6 +45,7 @@ M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \ M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \ M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \ + M(TemporaryFilesForMerge, "Number of temporary files for vertical merge") \ M(TemporaryFilesUnknown, "Number of temporary files created without known purpose") \ M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \ M(RemoteRead, "Number of read with remote reader in fly") \ diff --git a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp index 23b783173c8..6fd7570c4eb 100644 --- a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp +++ b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp @@ -69,7 +69,7 @@ static void testCascadeBufferRedability( auto rbuf = wbuf_readable.tryGetReadBuffer(); ASSERT_FALSE(!rbuf); - concat.appendBuffer(wrapReadBufferPointer(std::move(rbuf))); + concat.appendBuffer(std::move(rbuf)); } std::string decoded_data; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 0fe1c74ed17..e6fecc37cfa 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -335,7 +335,7 @@ Aggregator::Aggregator(const Block & header_, const Params & params_) : header(header_) , keys_positions(calculateKeysPositions(header, params_)) , params(params_) - , tmp_data(params.tmp_data_scope ? std::make_unique(params.tmp_data_scope, CurrentMetrics::TemporaryFilesForAggregation) : nullptr) + , tmp_data(params.tmp_data_scope ? params.tmp_data_scope->childScope(CurrentMetrics::TemporaryFilesForAggregation) : nullptr) , min_bytes_for_prefetch(getMinBytesForPrefetch()) { /// Use query-level memory tracker @@ -1519,10 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); - auto & out_stream = tmp_data->createStream(getHeader(false), max_temp_file_size); + auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); - LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getPath()); + LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); /// Flush only two-level data and possibly overflow data. @@ -1643,7 +1643,7 @@ template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const + TemporaryBlockStreamHolder & out) const { size_t max_temporary_block_size_rows = 0; size_t max_temporary_block_size_bytes = 0; @@ -1660,14 +1660,14 @@ void Aggregator::writeToTemporaryFileImpl( for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) { Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket); - out.write(block); + out->write(block); update_max_sizes(block); } if (params.overflow_row) { Block block = prepareBlockAndFillWithoutKey(data_variants, false, true); - out.write(block); + out->write(block); update_max_sizes(block); } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 4de0a640219..bc28d3dccb8 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return tmp_data && !tmp_data->empty(); } + bool hasTemporaryData() const { return !tmp_files.empty(); } - const TemporaryDataOnDisk & getTemporaryData() const { return *tmp_data; } + std::vector & getTemporaryData() { return tmp_files; } /// Get data structure of the result. Block getHeader(bool final) const; @@ -355,7 +355,8 @@ private: LoggerPtr log = getLogger("Aggregator"); /// For external aggregation. - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + mutable std::vector tmp_files; size_t min_bytes_for_prefetch = 0; @@ -456,7 +457,7 @@ private: void writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const; + TemporaryBlockStreamHolder & out) const; /// Merge NULL key data from hash table `src` into `dst`. template diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 85cde959b66..6ada12e63f9 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -353,6 +353,8 @@ struct ContextSharedPart : boost::noncopyable /// Child scopes for more fine-grained accounting are created per user/query/etc. /// Initialized once during server startup. TemporaryDataOnDiskScopePtr root_temp_data_on_disk TSA_GUARDED_BY(mutex); + /// TODO: remove, use only root_temp_data_on_disk + VolumePtr temporary_volume_legacy; mutable OnceFlag async_loader_initialized; mutable std::unique_ptr async_loader; /// Thread pool for asynchronous initialization of arbitrary DAG of `LoadJob`s (used for tables loading) @@ -783,10 +785,9 @@ struct ContextSharedPart : boost::noncopyable } /// Special volumes might also use disks that require shutdown. - auto & tmp_data = root_temp_data_on_disk; - if (tmp_data && tmp_data->getVolume()) + if (temporary_volume_legacy) { - auto & disks = tmp_data->getVolume()->getDisks(); + auto & disks = temporary_volume_legacy->getDisks(); for (auto & disk : disks) disk->shutdown(); } @@ -1166,8 +1167,8 @@ VolumePtr Context::getGlobalTemporaryVolume() const SharedLockGuard lock(shared->mutex); /// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly. /// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set). - if (shared->root_temp_data_on_disk) - return shared->root_temp_data_on_disk->getVolume(); + if (shared->temporary_volume_legacy) + return shared->temporary_volume_legacy; return nullptr; } @@ -1288,7 +1289,8 @@ void Context::setTemporaryStoragePath(const String & path, size_t max_size) TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_size) @@ -1336,7 +1338,8 @@ void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_s TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t max_size) @@ -1360,7 +1363,8 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setFlagsPath(const String & path) diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 978782c851f..a2010b7d94b 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -41,15 +41,15 @@ namespace class AccumulatedBlockReader { public: - AccumulatedBlockReader(TemporaryFileStream & reader_, + AccumulatedBlockReader(TemporaryBlockStreamReaderHolder reader_, std::mutex & mutex_, size_t result_block_size_ = 0) - : reader(reader_) + : reader(std::move(reader_)) , mutex(mutex_) , result_block_size(result_block_size_) { - if (!reader.isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Reading not finished file"); + if (!reader) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Reader is nullptr"); } Block read() @@ -63,7 +63,7 @@ namespace size_t rows_read = 0; do { - Block block = reader.read(); + Block block = reader->read(); rows_read += block.rows(); if (!block) { @@ -81,7 +81,7 @@ namespace } private: - TemporaryFileStream & reader; + TemporaryBlockStreamReaderHolder reader; std::mutex & mutex; const size_t result_block_size; @@ -124,12 +124,12 @@ class GraceHashJoin::FileBucket : boost::noncopyable public: using BucketLock = std::unique_lock; - explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, LoggerPtr log_) - : idx{bucket_index_} - , left_file{left_file_} - , right_file{right_file_} - , state{State::WRITING_BLOCKS} - , log{log_} + explicit FileBucket(size_t bucket_index_, TemporaryBlockStreamHolder left_file_, TemporaryBlockStreamHolder right_file_, LoggerPtr log_) + : idx(bucket_index_) + , left_file(std::move(left_file_)) + , right_file(std::move(right_file_)) + , state(State::WRITING_BLOCKS) + , log(log_) { } @@ -157,12 +157,6 @@ public: return addBlockImpl(block, right_file, lock); } - bool finished() const - { - std::unique_lock left_lock(left_file_mutex); - return left_file.isEof(); - } - bool empty() const { return is_empty.load(); } AccumulatedBlockReader startJoining() @@ -172,24 +166,21 @@ public: std::unique_lock left_lock(left_file_mutex); std::unique_lock right_lock(right_file_mutex); - left_file.finishWriting(); - right_file.finishWriting(); - state = State::JOINING_BLOCKS; } - return AccumulatedBlockReader(right_file, right_file_mutex); + return AccumulatedBlockReader(right_file.getReadStream(), right_file_mutex); } AccumulatedBlockReader getLeftTableReader() { ensureState(State::JOINING_BLOCKS); - return AccumulatedBlockReader(left_file, left_file_mutex); + return AccumulatedBlockReader(left_file.getReadStream(), left_file_mutex); } const size_t idx; private: - bool addBlockImpl(const Block & block, TemporaryFileStream & writer, std::unique_lock & lock) + bool addBlockImpl(const Block & block, TemporaryBlockStreamHolder & writer, std::unique_lock & lock) { ensureState(State::WRITING_BLOCKS); @@ -199,7 +190,7 @@ private: if (block.rows()) is_empty = false; - writer.write(block); + writer->write(block); return true; } @@ -217,8 +208,8 @@ private: throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid state transition, expected {}, got {}", expected, state.load()); } - TemporaryFileStream & left_file; - TemporaryFileStream & right_file; + TemporaryBlockStreamHolder left_file; + TemporaryBlockStreamHolder right_file; mutable std::mutex left_file_mutex; mutable std::mutex right_file_mutex; @@ -274,7 +265,7 @@ GraceHashJoin::GraceHashJoin( , max_num_buckets{context->getSettingsRef()[Setting::grace_hash_join_max_buckets]} , left_key_names(table_join->getOnlyClause().key_names_left) , right_key_names(table_join->getOnlyClause().key_names_right) - , tmp_data(std::make_unique(tmp_data_, CurrentMetrics::TemporaryFilesForJoin)) + , tmp_data(tmp_data_->childScope(CurrentMetrics::TemporaryFilesForJoin)) , hash_join(makeInMemoryJoin("grace0")) , hash_join_sample_block(hash_join->savedBlockSample()) { @@ -398,10 +389,10 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - auto & left_file = tmp_data->createStream(left_sample_block); - auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block)); + TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); - BucketPtr new_bucket = std::make_shared(current_size + i, left_file, right_file, log); + BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); } catch (...) @@ -632,12 +623,9 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() for (bucket_idx = bucket_idx + 1; bucket_idx < buckets.size(); ++bucket_idx) { current_bucket = buckets[bucket_idx].get(); - if (current_bucket->finished() || current_bucket->empty()) + if (current_bucket->empty()) { - LOG_TRACE(log, "Skipping {} {} bucket {}", - current_bucket->finished() ? "finished" : "", - current_bucket->empty() ? "empty" : "", - bucket_idx); + LOG_TRACE(log, "Skipping empty bucket {}", bucket_idx); continue; } diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index d31d6886af7..938c9b1facf 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -132,7 +132,7 @@ private: Names left_key_names; Names right_key_names; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; Buckets buckets; mutable SharedMutex rehash_mutex; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 3e7f3deea8b..af23b520abb 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -35,11 +35,6 @@ #include #include -namespace CurrentMetrics -{ - extern const Metric TemporaryFilesForJoin; -} - namespace DB { @@ -64,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - std::unique_ptr reader; + TemporaryBlockStreamReaderHolder reader; }; @@ -106,10 +101,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , instance_id(instance_id_) , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) - , tmp_data( - table_join_->getTempDataOnDisk() - ? std::make_unique(table_join_->getTempDataOnDisk(), CurrentMetrics::TemporaryFilesForJoin) - : nullptr) + , tmp_data(table_join_->getTempDataOnDisk()) , right_sample_block(right_sample_block_) , max_joined_block_rows(table_join->maxJoinedBlockRows()) , instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "") @@ -520,10 +512,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) && (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { - if (tmp_stream == nullptr) - { - tmp_stream = &tmp_data->createStream(right_sample_block); - } + if (!tmp_stream) + tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream->write(block_to_save); return true; } @@ -730,7 +721,7 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - std::unique_ptr reader = nullptr; + TemporaryBlockStreamReaderHolder reader; if (not_processed) { auto & continuation = static_cast(*not_processed); @@ -804,11 +795,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { - if (reader == nullptr) - { - tmp_stream->finishWritingAsyncSafe(); - reader = tmp_stream->getReadStream(); - } + if (!reader) + reader = tmp_stream.getReadStream(); + while (auto block_right = reader->read()) { ++block_number; diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 4c1ebbcdc66..0f50e110db9 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -423,8 +423,9 @@ private: std::vector key_sizes; /// Needed to do external cross join - TemporaryDataOnDiskPtr tmp_data; - TemporaryFileStream* tmp_stream{nullptr}; + TemporaryDataOnDiskScopePtr tmp_data; + TemporaryBlockStreamHolder tmp_stream; + mutable std::once_flag finish_writing; /// Block with columns from the right-side table. Block right_sample_block; diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index e1bae55a4ed..4ecbc9eb960 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -20,6 +20,11 @@ #include #include +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForJoin; +} + namespace DB { @@ -265,7 +270,7 @@ public: VolumePtr getGlobalTemporaryVolume() { return tmp_volume; } - TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } + TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data ? tmp_data->childScope(CurrentMetrics::TemporaryFilesForJoin) : nullptr; } ActionsDAG createJoinedBlockActions(ContextPtr context) const; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 81796678f24..c3b24fb783b 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -27,11 +27,266 @@ namespace DB namespace ErrorCodes { - extern const int TOO_MANY_ROWS_OR_BYTES; + extern const int INVALID_STATE; extern const int LOGICAL_ERROR; extern const int NOT_ENOUGH_SPACE; + extern const int TOO_MANY_ROWS_OR_BYTES; } +namespace +{ + +inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) +{ + if (settings.compression_codec.empty()) + return CompressionCodecFactory::instance().get("NONE"); + + return CompressionCodecFactory::instance().get(settings.compression_codec); +} + +} + +TemporaryFileHolder::TemporaryFileHolder() +{ + ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); +} + + +class TemporaryFileInLocalCache : public TemporaryFileHolder +{ +public: + explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) + { + const auto key = FileSegment::Key::random(); + segment_holder = file_cache.set( + key, 0, std::max(10_MiB, max_file_size), + CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); + + chassert(segment_holder->size() == 1); + segment_holder->front().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); + } + + std::unique_ptr write() override + { + return std::make_unique(&segment_holder->front()); + } + + std::unique_ptr read(size_t buffer_size) const override + { + return std::make_unique(segment_holder->front().getPath(), /* buf_size = */ buffer_size); + } + + String describeFilePath() const override + { + return fmt::format("fscache://{}", segment_holder->front().getPath()); + } + +private: + FileSegmentsHolderPtr segment_holder; +}; + +class TemporaryFileOnLocalDisk : public TemporaryFileHolder +{ +public: + explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) + : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) + { + if (max_file_size > 0) + { + auto reservation = volume->reserve(max_file_size); + if (!reservation) + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + disk = reservation->getDisk(); + } + else + { + disk = volume->getDisk(); + } + chassert(disk); + } + + std::unique_ptr write() override + { + return disk->writeFile(path_to_file); + } + + std::unique_ptr read(size_t buffer_size) const override + { + ReadSettings settings; + settings.local_fs_buffer_size = buffer_size; + settings.remote_fs_buffer_size = buffer_size; + settings.prefetch_buffer_size = buffer_size; + + return disk->readFile(path_to_file, settings); + } + + String describeFilePath() const override + { + return fmt::format("disk({})://{}/{}", disk->getName(), disk->getPath(), path_to_file); + } + + ~TemporaryFileOnLocalDisk() override + try + { + if (disk->exists(path_to_file)) + disk->removeRecursive(path_to_file); + else + LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + +private: + DiskPtr disk; + String path_to_file; +}; + +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume) +{ + if (!volume) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Volume is not initialized"); + return [volume](size_t max_size) -> std::unique_ptr + { + return std::make_unique(volume, max_size); + }; +} + +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache) +{ + if (!file_cache || !file_cache->isInitialized()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "File cache is not initialized"); + return [file_cache](size_t max_size) -> std::unique_ptr + { + return std::make_unique(*file_cache, max_size); + }; +} + +TemporaryDataOnDiskScopePtr TemporaryDataOnDiskScope::childScope(CurrentMetrics::Metric current_metric) +{ + TemporaryDataOnDiskSettings child_settings = settings; + child_settings.current_metric = current_metric; + return std::make_shared(shared_from_this(), child_settings); +} + +TemporaryDataReadBuffer::TemporaryDataReadBuffer(std::unique_ptr in_) + : ReadBuffer(nullptr, 0) + , compressed_buf(std::move(in_)) +{ + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); +} + +bool TemporaryDataReadBuffer::nextImpl() +{ + compressed_buf->position() = position(); + if (!compressed_buf->next()) + { + set(compressed_buf->position(), 0); + return false; + } + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); + return true; +} + +TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WriteBuffer(nullptr, 0) + , parent(parent_) + , file_holder(parent->file_provider(max_file_size == 0 ? parent->getSettings().max_size_on_disk : max_file_size)) + , out_compressed_buf(file_holder->write(), getCodec(parent->getSettings())) +{ + WriteBuffer::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size()); +} + +void TemporaryDataBuffer::nextImpl() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + out_compressed_buf->position() = position(); + out_compressed_buf->next(); + BufferBase::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size(), out_compressed_buf->offset()); + updateAllocAndCheck(); +} + +String TemporaryDataBuffer::describeFilePath() const +{ + return file_holder->describeFilePath(); +} + +TemporaryDataBuffer::~TemporaryDataBuffer() +{ + if (out_compressed_buf) + // read() nor finishWriting() was called + cancel(); +} + +void TemporaryDataBuffer::cancelImpl() noexcept +{ + if (out_compressed_buf) + { + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->cancel(); + out_compressed_buf.getHolder()->cancel(); + out_compressed_buf.reset(); + } +} + +void TemporaryDataBuffer::finalizeImpl() +{ + if (!out_compressed_buf) + return; + + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->finalize(); + out_compressed_buf.getHolder()->finalize(); + + updateAllocAndCheck(); + out_compressed_buf.reset(); +} + +TemporaryDataBuffer::Stat TemporaryDataBuffer::finishWriting() +{ + /// TemporaryDataBuffer::read can be called from multiple threads + std::call_once(write_finished, [this] + { + if (canceled) + throw Exception(ErrorCodes::INVALID_STATE, "Writing to temporary file buffer was not successful"); + next(); + finalize(); + }); + return stat; +} + +std::unique_ptr TemporaryDataBuffer::read() +{ + finishWriting(); + + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files + size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); + return std::make_unique(file_holder->read(buffer_size)); +} + +void TemporaryDataBuffer::updateAllocAndCheck() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + size_t new_compressed_size = out_compressed_buf->getCompressedBytes(); + size_t new_uncompressed_size = out_compressed_buf->getUncompressedBytes(); + + if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", + file_holder ? file_holder->describeFilePath() : "NULL", + new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); + } + + parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); + stat.compressed_size = new_compressed_size; + stat.uncompressed_size = new_uncompressed_size; +} void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta) { @@ -54,391 +309,25 @@ void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssiz stat.uncompressed_size += uncompressed_delta; } -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) +TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WrapperGuard(std::make_unique(parent_, max_file_size), DBMS_TCP_PROTOCOL_VERSION, header_) + , header(header_) {} -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) - , current_metric_scope(metric_scope) -{} - -std::unique_ptr TemporaryDataOnDisk::createRawStream(size_t max_file_size) +TemporaryDataBuffer::Stat TemporaryBlockStreamHolder::finishWriting() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - return std::make_unique(std::move(holder)); - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - return std::make_unique(std::move(tmp_file)); - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); + impl->flush(); + return holder->finishWriting(); } -TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size) +TemporaryBlockStreamReaderHolder TemporaryBlockStreamHolder::getReadStream() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream = streams.emplace_back(std::make_unique(std::move(holder), header, this)); - return *tmp_stream; - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream - = streams.emplace_back(std::make_unique(std::move(tmp_file), header, this)); - return *tmp_stream; - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); -} - -FileSegmentsHolderPtr TemporaryDataOnDisk::createCacheFile(size_t max_file_size) -{ - if (!file_cache) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache"); - - ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); - - const auto key = FileSegment::Key::random(); - auto holder = file_cache->set( - key, 0, std::max(10_MiB, max_file_size), - CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); - - chassert(holder->size() == 1); - holder->back().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); - - return holder; -} - -TemporaryFileOnDiskHolder TemporaryDataOnDisk::createRegularFile(size_t max_file_size) -{ - if (!volume) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no volume"); - - DiskPtr disk; - if (max_file_size > 0) - { - auto reservation = volume->reserve(max_file_size); - if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); - disk = reservation->getDisk(); - } - else - { - disk = volume->getDisk(); - } - /// We do not increment ProfileEvents::ExternalProcessingFilesTotal here because it is incremented in TemporaryFileOnDisk constructor. - return std::make_unique(disk, current_metric_scope); -} - -std::vector TemporaryDataOnDisk::getStreams() const -{ - std::vector res; - std::lock_guard lock(mutex); - res.reserve(streams.size()); - for (const auto & stream : streams) - res.push_back(stream.get()); - return res; -} - -bool TemporaryDataOnDisk::empty() const -{ - std::lock_guard lock(mutex); - return streams.empty(); -} - -static inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) -{ - if (settings.compression_codec.empty()) - return CompressionCodecFactory::instance().get("NONE"); - - return CompressionCodecFactory::instance().get(settings.compression_codec); -} - -struct TemporaryFileStream::OutputWriter -{ - OutputWriter(std::unique_ptr out_buf_, const Block & header_, const TemporaryDataOnDiskSettings & settings) - : out_buf(std::move(out_buf_)) - , out_compressed_buf(*out_buf, getCodec(settings)) - , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) - { - } - - size_t write(const Block & block) - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write to finalized stream"); - size_t written_bytes = out_writer.write(block); - num_rows += block.rows(); - return written_bytes; - } - - void flush() - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot flush finalized stream"); - - out_compressed_buf.next(); - out_buf->next(); - out_writer.flush(); - } - - void finalize() - { - if (finalized) - return; - - /// if we called finalize() explicitly, and got an exception, - /// we don't want to get it again in the destructor, so set finalized flag first - finalized = true; - - out_writer.flush(); - out_compressed_buf.finalize(); - out_buf->finalize(); - } - - ~OutputWriter() - { - try - { - finalize(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - std::unique_ptr out_buf; - CompressedWriteBuffer out_compressed_buf; - NativeWriter out_writer; - - std::atomic_size_t num_rows = 0; - - bool finalized = false; -}; - -TemporaryFileStream::Reader::Reader(const String & path_, const Block & header_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) - , header(header_) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); -} - -TemporaryFileStream::Reader::Reader(const String & path_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading from {}", path); -} - -Block TemporaryFileStream::Reader::read() -{ - if (!in_reader) - { - if (fs::exists(path)) - in_file_buf = std::make_unique(path, size); - else - in_file_buf = std::make_unique(); - - in_compressed_buf = std::make_unique(*in_file_buf); - if (header.has_value()) - in_reader = std::make_unique(*in_compressed_buf, header.value(), DBMS_TCP_PROTOCOL_VERSION); - else - in_reader = std::make_unique(*in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION); - } - return in_reader->read(); -} - -TemporaryFileStream::TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , file(std::move(file_)) - , out_writer(std::make_unique(std::make_unique(file->getAbsolutePath()), header, parent->settings)) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", file->getAbsolutePath()); -} - -TemporaryFileStream::TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , segment_holder(std::move(segments_)) -{ - if (segment_holder->size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream can be created only from single segment"); - auto out_buf = std::make_unique(&segment_holder->front()); - - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", out_buf->getFileName()); - out_writer = std::make_unique(std::move(out_buf), header, parent_->settings); -} - -size_t TemporaryFileStream::write(const Block & block) -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - updateAllocAndCheck(); - size_t bytes_written = out_writer->write(block); - return bytes_written; -} - -void TemporaryFileStream::flush() -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - out_writer->flush(); -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWriting() -{ - if (isWriteFinished()) - return stat; - - if (out_writer) - { - out_writer->finalize(); - /// The amount of written data can be changed after finalization, some buffers can be flushed - /// Need to update the stat - updateAllocAndCheck(); - out_writer.reset(); - - /// reader will be created at the first read call, not to consume memory before it is needed - } - return stat; -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWritingAsyncSafe() -{ - std::call_once(finish_writing, [this]{ finishWriting(); }); - return stat; -} - -bool TemporaryFileStream::isWriteFinished() const -{ - assert(in_reader == nullptr || out_writer == nullptr); - return out_writer == nullptr; -} - -Block TemporaryFileStream::read() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return {}; - - if (!in_reader) - { - in_reader = std::make_unique(getPath(), header, getSize()); - } - - Block block = in_reader->read(); - if (!block) - { - /// finalize earlier to release resources, do not wait for the destructor - this->release(); - } - return block; -} - -std::unique_ptr TemporaryFileStream::getReadStream() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return nullptr; - - return std::make_unique(getPath(), header, getSize()); -} - -void TemporaryFileStream::updateAllocAndCheck() -{ - assert(out_writer); - size_t new_compressed_size = out_writer->out_compressed_buf.getCompressedBytes(); - size_t new_uncompressed_size = out_writer->out_compressed_buf.getUncompressedBytes(); - - if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", - getPath(), new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); - } - - parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); - stat.compressed_size = new_compressed_size; - stat.uncompressed_size = new_uncompressed_size; - stat.num_rows = out_writer->num_rows; -} - -bool TemporaryFileStream::isEof() const -{ - return file == nullptr && !segment_holder; -} - -void TemporaryFileStream::release() -{ - if (in_reader) - in_reader.reset(); - - if (out_writer) - { - out_writer->finalize(); - out_writer.reset(); - } - - if (file) - { - file.reset(); - parent->deltaAllocAndCheck(-stat.compressed_size, -stat.uncompressed_size); - } - - if (segment_holder) - segment_holder.reset(); -} - -String TemporaryFileStream::getPath() const -{ - if (file) - return file->getAbsolutePath(); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getPath(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -size_t TemporaryFileStream::getSize() const -{ - if (file) - return file->getDisk()->getFileSize(file->getRelativePath()); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getReservedSize(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -TemporaryFileStream::~TemporaryFileStream() -{ - try - { - release(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - assert(false); /// deltaAllocAndCheck with negative can't throw exception - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); + return TemporaryBlockStreamReaderHolder(holder->read(), header, DBMS_TCP_PROTOCOL_VERSION); } } diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index d541c93e031..f8d14b00ac5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -4,15 +4,21 @@ #include #include -#include +#include #include -#include -#include +#include + #include #include -#include -#include +#include +#include + +#include + +#include + +class FileCacheTest_TemporaryDataReadBufferSize_Test; namespace CurrentMetrics { @@ -25,11 +31,10 @@ namespace DB class TemporaryDataOnDiskScope; using TemporaryDataOnDiskScopePtr = std::shared_ptr; -class TemporaryDataOnDisk; -using TemporaryDataOnDiskPtr = std::unique_ptr; +class TemporaryDataBuffer; +using TemporaryDataBufferPtr = std::unique_ptr; -class TemporaryFileStream; -using TemporaryFileStreamPtr = std::unique_ptr; +class TemporaryFileHolder; class FileCache; @@ -40,15 +45,26 @@ struct TemporaryDataOnDiskSettings /// Compression codec for temporary data, if empty no compression will be used. LZ4 by default String compression_codec = "LZ4"; + + /// Read/Write internal buffer size + size_t buffer_size = DBMS_DEFAULT_BUFFER_SIZE; + + /// Metrics counter to increment when temporary file in current scope are created + CurrentMetrics::Metric current_metric = CurrentMetrics::TemporaryFilesUnknown; }; +/// Creates temporary files located on specified resource (disk, fs_cache, etc.) +using TemporaryFileProvider = std::function(size_t)>; +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume); +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache); + /* * Used to account amount of temporary data written to disk. * If limit is set, throws exception if limit is exceeded. * Data can be nested, so parent scope accounts all data written by children. * Scopes are: global -> per-user -> per-query -> per-purpose (sorting, aggregation, etc). */ -class TemporaryDataOnDiskScope : boost::noncopyable +class TemporaryDataOnDiskScope : boost::noncopyable, public std::enable_shared_from_this { public: struct StatAtomic @@ -57,164 +73,155 @@ public: std::atomic uncompressed_size; }; - explicit TemporaryDataOnDiskScope(VolumePtr volume_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) + /// Root scope + template + TemporaryDataOnDiskScope(T && storage, TemporaryDataOnDiskSettings settings_) + : file_provider(createTemporaryFileProvider(std::forward(storage))) , settings(std::move(settings_)) {} - explicit TemporaryDataOnDiskScope(VolumePtr volume_, FileCache * file_cache_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) - , file_cache(file_cache_) - , settings(std::move(settings_)) - {} - explicit TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) + TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) : parent(std::move(parent_)) - , volume(parent->volume) - , file_cache(parent->file_cache) + , file_provider(parent->file_provider) , settings(std::move(settings_)) {} - /// TODO: remove - /// Refactor all code that uses volume directly to use TemporaryDataOnDisk. - VolumePtr getVolume() const { return volume; } + TemporaryDataOnDiskScopePtr childScope(CurrentMetrics::Metric current_metric); const TemporaryDataOnDiskSettings & getSettings() const { return settings; } - protected: + friend class TemporaryDataBuffer; + void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta); TemporaryDataOnDiskScopePtr parent = nullptr; - VolumePtr volume = nullptr; - FileCache * file_cache = nullptr; + TemporaryFileProvider file_provider; StatAtomic stat; const TemporaryDataOnDiskSettings settings; }; -/* - * Holds the set of temporary files. - * New file stream is created with `createStream`. - * Streams are owned by this object and will be deleted when it is deleted. - * It's a leaf node in temporary data scope tree. - */ -class TemporaryDataOnDisk : private TemporaryDataOnDiskScope +/** Used to hold the wrapper and wrapped object together. + * This class provides a convenient way to manage the lifetime of both the wrapper and the wrapped object. + * The wrapper class (Impl) stores a reference to the wrapped object (Holder), and both objects are owned by this class. + * The lifetime of the wrapper and the wrapped object should be the same. + * This pattern is commonly used when the caller only needs to interact with the wrapper and doesn't need to be aware of the wrapped object. + * Examples: CompressedWriteBuffer and WriteBuffer, and NativeReader and ReadBuffer. + */ +template +class WrapperGuard { - friend class TemporaryFileStream; /// to allow it to call `deltaAllocAndCheck` to account data - public: - using TemporaryDataOnDiskScope::StatAtomic; + WrapperGuard() = default; - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_); + template + WrapperGuard(std::unique_ptr holder_, Args && ... args) + : holder(std::move(holder_)) + , impl(std::make_unique(*holder, std::forward(args)...)) + {} - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope); + Impl * operator->() { return impl.get(); } + const Impl * operator->() const { return impl.get(); } + Impl & operator*() { return *impl; } + const Impl & operator*() const { return *impl; } + operator bool() const { return impl != nullptr; } - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0); + const Holder * getHolder() const { return holder.get(); } + Holder * getHolder() { return holder.get(); } - /// Write raw data directly into buffer. - /// Differences from `createStream`: - /// 1) it doesn't account data in parent scope - /// 2) returned buffer owns resources (instead of TemporaryDataOnDisk itself) - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - std::unique_ptr createRawStream(size_t max_file_size = 0); + void reset() + { + impl.reset(); + holder.reset(); + } - std::vector getStreams() const; - bool empty() const; - - const StatAtomic & getStat() const { return stat; } - -private: - FileSegmentsHolderPtr createCacheFile(size_t max_file_size); - TemporaryFileOnDiskHolder createRegularFile(size_t max_file_size); - - mutable std::mutex mutex; - std::vector streams TSA_GUARDED_BY(mutex); - - typename CurrentMetrics::Metric current_metric_scope = CurrentMetrics::TemporaryFilesUnknown; +protected: + std::unique_ptr holder; + std::unique_ptr impl; }; -/* - * Data can be written into this stream and then read. - * After finish writing, call `finishWriting` and then either call `read` or 'getReadStream'(only one of the two) to read the data. - * Account amount of data written to disk in parent scope. - */ -class TemporaryFileStream : boost::noncopyable +/// Owns temporary file and provides access to it. +/// On destruction, file is removed and all resources are freed. +/// Lifetime of read/write buffers should be less than lifetime of TemporaryFileHolder. +class TemporaryFileHolder { public: - struct Reader - { - Reader(const String & path, const Block & header_, size_t size = 0); + TemporaryFileHolder(); - explicit Reader(const String & path, size_t size = 0); + virtual std::unique_ptr write() = 0; + virtual std::unique_ptr read(size_t buffer_size) const = 0; - Block read(); + /// Get location for logging purposes + virtual String describeFilePath() const = 0; - const std::string path; - const size_t size; - const std::optional header; + virtual ~TemporaryFileHolder() = default; +}; - std::unique_ptr in_file_buf; - std::unique_ptr in_compressed_buf; - std::unique_ptr in_reader; - }; +class TemporaryDataReadBuffer : public ReadBuffer +{ +public: + explicit TemporaryDataReadBuffer(std::unique_ptr in_); + +private: + friend class ::FileCacheTest_TemporaryDataReadBufferSize_Test; + + bool nextImpl() override; + + WrapperGuard compressed_buf; +}; + +/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +class TemporaryDataBuffer : public WriteBuffer +{ +public: struct Stat { - /// Statistics for file - /// Non-atomic because we don't allow to `read` or `write` into single file from multiple threads size_t compressed_size = 0; size_t uncompressed_size = 0; - size_t num_rows = 0; }; - TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_); - TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_); - - size_t write(const Block & block); - void flush(); + explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + void nextImpl() override; + void finalizeImpl() override; + void cancelImpl() noexcept override; + std::unique_ptr read(); Stat finishWriting(); - Stat finishWritingAsyncSafe(); - bool isWriteFinished() const; - std::unique_ptr getReadStream(); + String describeFilePath() const; - Block read(); - - String getPath() const; - size_t getSize() const; - - Block getHeader() const { return header; } - - /// Read finished and file released - bool isEof() const; - - ~TemporaryFileStream(); + ~TemporaryDataBuffer() override; private: void updateAllocAndCheck(); - /// Release everything, close reader and writer, delete file - void release(); - - TemporaryDataOnDisk * parent; - - Block header; - - /// Data can be stored in file directly or in the cache - TemporaryFileOnDiskHolder file; - FileSegmentsHolderPtr segment_holder; + TemporaryDataOnDiskScope * parent; + std::unique_ptr file_holder; + WrapperGuard out_compressed_buf; + std::once_flag write_finished; Stat stat; +}; - std::once_flag finish_writing; +using TemporaryBlockStreamReaderHolder = WrapperGuard; - struct OutputWriter; - std::unique_ptr out_writer; +class TemporaryBlockStreamHolder : public WrapperGuard +{ +public: + TemporaryBlockStreamHolder() = default; - std::unique_ptr in_reader; + TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + + TemporaryBlockStreamReaderHolder getReadStream() const; + + TemporaryDataBuffer::Stat finishWriting() const; + const Block & getHeader() const { return header; } + +private: + Block header; }; } diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 007b31d9fdc..ae45443d4bd 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -934,7 +934,7 @@ static Block generateBlock(size_t size = 0) return block; } -static size_t readAllTemporaryData(TemporaryFileStream & stream) +static size_t readAllTemporaryData(NativeReader & stream) { Block block; size_t read_rows = 0; @@ -947,6 +947,7 @@ static size_t readAllTemporaryData(TemporaryFileStream & stream) } TEST_F(FileCacheTest, temporaryData) +try { ServerUUID::setRandomForUnitTests(); DB::FileCacheSettings settings; @@ -959,7 +960,7 @@ TEST_F(FileCacheTest, temporaryData) file_cache.initialize(); const auto user = FileCache::getCommonUser(); - auto tmp_data_scope = std::make_shared(nullptr, &file_cache, TemporaryDataOnDiskSettings{}); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto some_data_holder = file_cache.getOrSet(FileCacheKey::fromPath("some_data"), 0, 5_KiB, 5_KiB, CreateFileSegmentSettings{}, 0, user); @@ -982,12 +983,17 @@ TEST_F(FileCacheTest, temporaryData) size_t size_used_with_temporary_data; size_t segments_used_with_temporary_data; + + { - auto tmp_data = std::make_unique(tmp_data_scope); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_TRUE(stream); + /// Do nothitng with stream, just create it and destroy. + } - auto & stream = tmp_data->createStream(generateBlock()); - - ASSERT_GT(stream.write(generateBlock(100)), 0); + { + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_GT(stream->write(generateBlock(100)), 0); ASSERT_GT(file_cache.getUsedCacheSize(), 0); ASSERT_GT(file_cache.getFileSegmentsNum(), 0); @@ -995,22 +1001,22 @@ TEST_F(FileCacheTest, temporaryData) size_t used_size_before_attempt = file_cache.getUsedCacheSize(); /// data can't be evicted because it is still held by `some_data_holder` ASSERT_THROW({ - stream.write(generateBlock(2000)); - stream.flush(); + stream->write(generateBlock(2000)); + stream.finishWriting(); }, DB::Exception); + ASSERT_THROW(stream.finishWriting(), DB::Exception); + ASSERT_EQ(file_cache.getUsedCacheSize(), used_size_before_attempt); } { size_t before_used_size = file_cache.getUsedCacheSize(); - auto tmp_data = std::make_unique(tmp_data_scope); - - auto write_buf_stream = tmp_data->createRawStream(); + auto write_buf_stream = std::make_unique(tmp_data_scope.get()); write_buf_stream->write("1234567890", 10); write_buf_stream->write("abcde", 5); - auto read_buf = dynamic_cast(write_buf_stream.get())->tryGetReadBuffer(); + auto read_buf = write_buf_stream->read(); ASSERT_GT(file_cache.getUsedCacheSize(), before_used_size + 10); @@ -1023,22 +1029,22 @@ TEST_F(FileCacheTest, temporaryData) } { - auto tmp_data = std::make_unique(tmp_data_scope); - auto & stream = tmp_data->createStream(generateBlock()); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); - ASSERT_GT(stream.write(generateBlock(100)), 0); + ASSERT_GT(stream->write(generateBlock(100)), 0); some_data_holder.reset(); - stream.write(generateBlock(2000)); + stream->write(generateBlock(2000)); - auto stat = stream.finishWriting(); + stream.finishWriting(); - ASSERT_TRUE(fs::exists(stream.getPath())); - ASSERT_GT(fs::file_size(stream.getPath()), 100); + String file_path = stream.getHolder()->describeFilePath().substr(strlen("fscache://")); - ASSERT_EQ(stat.num_rows, 2100); - ASSERT_EQ(readAllTemporaryData(stream), 2100); + ASSERT_TRUE(fs::exists(file_path)) << "File " << file_path << " should exist"; + ASSERT_GT(fs::file_size(file_path), 100) << "File " << file_path << " should be larger than 100 bytes"; + + ASSERT_EQ(readAllTemporaryData(*stream.getReadStream()), 2100); size_used_with_temporary_data = file_cache.getUsedCacheSize(); segments_used_with_temporary_data = file_cache.getFileSegmentsNum(); @@ -1054,6 +1060,11 @@ TEST_F(FileCacheTest, temporaryData) ASSERT_LE(file_cache.getUsedCacheSize(), size_used_before_temporary_data); ASSERT_LE(file_cache.getFileSegmentsNum(), segments_used_before_temporary_data); } +catch (...) +{ + std::cerr << getCurrentExceptionMessage(true) << std::endl; + throw; +} TEST_F(FileCacheTest, CachedReadBuffer) { @@ -1148,18 +1159,22 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) DB::FileCache file_cache("cache", settings); file_cache.initialize(); - auto tmp_data_scope = std::make_shared(/*volume=*/nullptr, &file_cache, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); - /// We allocate buffer of size min(getSize(), DBMS_DEFAULT_BUFFER_SIZE) + stream->write(block); + auto stat = stream.finishWriting(); + + /// We allocate buffer of size min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE) /// We do care about buffer size because realistic external group by could generate 10^5 temporary files - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); + + auto reader = stream.getReadStream(); + auto * read_buf = reader.getHolder(); + const auto & internal_buffer = static_cast(read_buf)->compressed_buf.getHolder()->internalBuffer(); + ASSERT_EQ(internal_buffer.size(), 62); } /// Temporary data stored on disk @@ -1170,16 +1185,14 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) disk = createDisk("temporary_data_read_buffer_size_test_dir"); VolumePtr volume = std::make_shared("volume", disk); - auto tmp_data_scope = std::make_shared(/*volume=*/volume, /*cache=*/nullptr, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(volume, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); + stream->write(block); + auto stat = stream.finishWriting(); - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); } } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 07ee8f4ddef..1560e88ffef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -29,17 +30,18 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - out_row_sources_buf_, + temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) + , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index 99fd95d82d9..b7bb9914cf8 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,6 +11,8 @@ namespace Poco namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -35,7 +37,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -62,6 +64,8 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. + std::shared_ptr temp_data_buffer = nullptr; + LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 3a9cf7ee141..d4e4ba6aa5f 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { @@ -15,7 +16,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index c889668a38e..fc300e41026 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,6 +9,8 @@ namespace DB { +class TemporaryDataBuffer; + /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -21,7 +23,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -45,7 +47,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - WriteBuffer * out_row_sources_buf = nullptr; + std::shared_ptr out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index cd347d371d9..a3a33080f52 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -37,12 +38,13 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) + , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index 2f23f2a5c4d..d3b9837a253 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,6 +24,8 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -59,6 +61,8 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. + std::shared_ptr temp_data_buffer = nullptr; + /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 9f124c6ba18..1ceb1f46234 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -14,12 +15,13 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) + , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index e6d20ddac75..6f877459147 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,6 +8,8 @@ namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -22,7 +24,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -37,6 +39,8 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; + std::shared_ptr temp_data_buffer = nullptr; + std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 99fb700abf1..9b09c802783 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d2895a2a2e9..13330dcff6d 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 6e52450efa7..fb8e5ce74e3 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index dc262aab9ee..a9d9f4fb619 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 32b5d7bf343..0bdccd4795d 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index d99f9a7d1f1..1c03a4d74cd 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,7 +20,6 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; - TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..4fde246f764 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -280,9 +280,9 @@ void SortingStep::mergeSorting( if (increase_sort_description_compile_attempts) increase_sort_description_compile_attempts = false; - auto tmp_data_on_disk = sort_settings.tmp_data - ? std::make_unique(sort_settings.tmp_data, CurrentMetrics::TemporaryFilesForSort) - : std::unique_ptr(); + TemporaryDataOnDiskScopePtr tmp_data_on_disk = nullptr; + if (sort_settings.tmp_data) + tmp_data_on_disk = sort_settings.tmp_data->childScope(CurrentMetrics::TemporaryFilesForSort); return std::make_shared( header, diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 68f23898018..2c54788b995 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -54,9 +54,9 @@ namespace class SourceFromNativeStream : public ISource { public: - explicit SourceFromNativeStream(TemporaryFileStream * tmp_stream_) - : ISource(tmp_stream_->getHeader()) - , tmp_stream(tmp_stream_) + explicit SourceFromNativeStream(const Block & header, TemporaryBlockStreamReaderHolder tmp_stream_) + : ISource(header) + , tmp_stream(std::move(tmp_stream_)) {} String getName() const override { return "SourceFromNativeStream"; } @@ -69,7 +69,7 @@ namespace auto block = tmp_stream->read(); if (!block) { - tmp_stream = nullptr; + tmp_stream.reset(); return {}; } return convertToChunk(block); @@ -78,7 +78,7 @@ namespace std::optional getReadProgress() override { return std::nullopt; } private: - TemporaryFileStream * tmp_stream; + TemporaryBlockStreamReaderHolder tmp_stream; }; } @@ -811,15 +811,18 @@ void AggregatingTransform::initGenerate() Pipes pipes; /// Merge external data from all aggregators used in query. - for (const auto & aggregator : *params->aggregator_list_ptr) + for (auto & aggregator : *params->aggregator_list_ptr) { - const auto & tmp_data = aggregator.getTemporaryData(); - for (auto * tmp_stream : tmp_data.getStreams()) - pipes.emplace_back(Pipe(std::make_unique(tmp_stream))); + auto & tmp_data = aggregator.getTemporaryData(); + num_streams += tmp_data.size(); - num_streams += tmp_data.getStreams().size(); - compressed_size += tmp_data.getStat().compressed_size; - uncompressed_size += tmp_data.getStat().uncompressed_size; + for (auto & tmp_stream : tmp_data) + { + auto stat = tmp_stream.finishWriting(); + compressed_size += stat.compressed_size; + uncompressed_size += stat.uncompressed_size; + pipes.emplace_back(Pipe(std::make_unique(tmp_stream.getHeader(), tmp_stream.getReadStream()))); + } } LOG_DEBUG( diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index c45192e7118..ba157dabffb 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -27,15 +27,20 @@ namespace ProfileEvents namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, LoggerPtr log_) + BufferingToFileTransform(const Block & header, TemporaryBlockStreamHolder tmp_stream_, LoggerPtr log_) : IAccumulatingTransform(header, header) - , tmp_stream(tmp_stream_) + , tmp_stream(std::move(tmp_stream_)) , log(log_) { - LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getPath()); + LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getHolder()->describeFilePath()); ProfileEvents::increment(ProfileEvents::ExternalSortWritePart); } @@ -44,14 +49,15 @@ public: void consume(Chunk chunk) override { Block block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns()); - tmp_stream.write(block); + tmp_stream->write(block); } Chunk generate() override { - if (!tmp_stream.isWriteFinished()) + if (!tmp_read_stream) { auto stat = tmp_stream.finishWriting(); + tmp_read_stream = tmp_stream.getReadStream(); ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, stat.compressed_size); ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, stat.uncompressed_size); @@ -59,10 +65,11 @@ public: ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, stat.uncompressed_size); LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ", - tmp_stream.getPath(), ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); + tmp_stream.getHolder()->describeFilePath(), + ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_stream.read(); + Block block = tmp_read_stream->read(); if (!block) return {}; @@ -71,7 +78,8 @@ public: } private: - TemporaryFileStream & tmp_stream; + TemporaryBlockStreamHolder tmp_stream; + TemporaryBlockStreamReaderHolder tmp_read_stream; LoggerPtr log; }; @@ -86,7 +94,7 @@ MergeSortingTransform::MergeSortingTransform( size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_) : SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts) , max_bytes_before_remerge(max_bytes_before_remerge_) @@ -168,9 +176,13 @@ void MergeSortingTransform::consume(Chunk chunk) */ if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort) { + if (!tmp_data) + throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDisk is not set for MergeSortingTransform"); + temporary_files_num++; + /// If there's less free disk space than reserve_size, an exception will be thrown size_t reserve_size = sum_bytes_in_blocks + min_free_disk_space; - auto & tmp_stream = tmp_data->createStream(header_without_constants, reserve_size); + TemporaryBlockStreamHolder tmp_stream(header_without_constants, tmp_data.get(), reserve_size); size_t max_merged_block_size = this->max_merged_block_size; if (max_block_bytes > 0 && sum_rows_in_blocks > 0 && sum_bytes_in_blocks > 0) { @@ -179,7 +191,7 @@ void MergeSortingTransform::consume(Chunk chunk) max_merged_block_size = std::max(std::min(max_merged_block_size, max_block_bytes / avg_row_bytes), 128UL); } merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); - auto current_processor = std::make_shared(header_without_constants, tmp_stream, log); + auto current_processor = std::make_shared(header_without_constants, std::move(tmp_stream), log); processors.emplace_back(current_processor); @@ -221,14 +233,14 @@ void MergeSortingTransform::generate() { if (!generated_prefix) { - size_t num_tmp_files = tmp_data ? tmp_data->getStreams().size() : 0; - if (num_tmp_files == 0) - merge_sorter - = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + if (temporary_files_num == 0) + { + merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + } else { ProfileEvents::increment(ProfileEvents::ExternalSortMerge); - LOG_INFO(log, "There are {} temporary sorted parts to merge", num_tmp_files); + LOG_INFO(log, "There are {} temporary sorted parts to merge", temporary_files_num); processors.emplace_back(std::make_shared( header_without_constants, std::move(chunks), description, max_merged_block_size, limit)); diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index a39dd66caa0..f7cb63d518b 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -29,7 +29,7 @@ public: size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_); String getName() const override { return "MergeSortingTransform"; } @@ -45,7 +45,8 @@ private: size_t max_bytes_before_remerge; double remerge_lowered_memory_bytes_ratio; size_t max_bytes_before_external_sort; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + size_t temporary_files_num = 0; size_t min_free_disk_space; size_t max_block_bytes; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index a9e5b1535c0..1e274a97a08 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,6 +197,12 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } + template + void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) + { + (resources.*field).push_back(std::move(resource)); + } + /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index 10f7f39ab09..ee2ecc25cd5 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,6 +13,7 @@ class QueryPlan; class Context; struct QueryIdHolder; +class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -33,6 +34,7 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; + std::vector> rows_sources_temporary_file; }; } diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 8a9ae05b355..52b56860543 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -162,15 +162,16 @@ void HTTPHandler::pushDelayedResults(Output & used_output) for (auto & write_buf : write_buffers) { - if (!write_buf) - continue; - - IReadableWriteBuffer * write_buf_concrete = dynamic_cast(write_buf.get()); - if (write_buf_concrete) + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) { - ReadBufferPtr reread_buf = write_buf_concrete->tryGetReadBuffer(); - if (reread_buf) - read_buffers.emplace_back(wrapReadBufferPointer(reread_buf)); + if (auto reread_buf = write_buf_concrete->read()) + read_buffers.emplace_back(std::move(reread_buf)); + } + + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) + { + if (auto reread_buf = write_buf_concrete->tryGetReadBuffer()) + read_buffers.emplace_back(std::move(reread_buf)); } } @@ -312,21 +313,19 @@ void HTTPHandler::processQuery( if (buffer_size_memory > 0 || buffer_until_eof) { - CascadeWriteBuffer::WriteBufferPtrs cascade_buffer1; - CascadeWriteBuffer::WriteBufferConstructors cascade_buffer2; + CascadeWriteBuffer::WriteBufferPtrs cascade_buffers; + CascadeWriteBuffer::WriteBufferConstructors cascade_buffers_lazy; if (buffer_size_memory > 0) - cascade_buffer1.emplace_back(std::make_shared(buffer_size_memory)); + cascade_buffers.emplace_back(std::make_shared(buffer_size_memory)); if (buffer_until_eof) { - auto tmp_data = std::make_shared(server.context()->getTempDataOnDisk()); - - auto create_tmp_disk_buffer = [tmp_data] (const WriteBufferPtr &) -> WriteBufferPtr { - return tmp_data->createRawStream(); - }; - - cascade_buffer2.emplace_back(std::move(create_tmp_disk_buffer)); + auto tmp_data = server.context()->getTempDataOnDisk(); + cascade_buffers_lazy.emplace_back([tmp_data](const WriteBufferPtr &) -> WriteBufferPtr + { + return std::make_unique(tmp_data.get()); + }); } else { @@ -342,10 +341,10 @@ void HTTPHandler::processQuery( return next_buffer; }; - cascade_buffer2.emplace_back(push_memory_buffer_and_continue); + cascade_buffers_lazy.emplace_back(push_memory_buffer_and_continue); } - used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffer1), std::move(cascade_buffer2)); + used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffers), std::move(cascade_buffers_lazy)); used_output.out_maybe_delayed_and_compressed = used_output.out_delayed_and_compressed_holder.get(); } else diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 74d6d60ba1b..5c9d4ea61a2 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -118,68 +118,6 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } -/// Manages the "rows_sources" temporary file that is used during vertical merge. -class RowsSourcesTemporaryFile : public ITemporaryFileLookup -{ -public: - /// A logical name of the temporary file under which it will be known to the plan steps that use it. - static constexpr auto FILE_ID = "rows_sources"; - - explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) - : tmp_disk(std::make_unique(temporary_data_on_disk_)) - , uncompressed_write_buffer(tmp_disk->createRawStream()) - , tmp_file_name_on_disk(uncompressed_write_buffer->getFileName()) - { - } - - WriteBuffer & getTemporaryFileForWriting(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (write_buffer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); - - write_buffer = (std::make_unique(*uncompressed_write_buffer)); - return *write_buffer; - } - - std::unique_ptr getTemporaryFileForReading(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (!finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); - - /// tmp_disk might not create real file if no data was written to it. - if (final_size == 0) - return std::make_unique(); - - /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. - auto raw_file_read_buffer = std::make_unique(tmp_file_name_on_disk); - return std::make_unique(std::move(raw_file_read_buffer)); - } - - /// Returns written data size in bytes - size_t finalizeWriting() - { - write_buffer->finalize(); - uncompressed_write_buffer->finalize(); - finalized = true; - final_size = write_buffer->count(); - return final_size; - } - -private: - std::unique_ptr tmp_disk; - std::unique_ptr uncompressed_write_buffer; - std::unique_ptr write_buffer; - const String tmp_file_name_on_disk; - bool finalized = false; - size_t final_size = 0; -}; - static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -480,7 +418,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); + ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -854,22 +792,11 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; - size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; - size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); - /// In special case, when there is only one source part, and no rows were skipped, we may have - /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total - /// number of input rows. - if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " - "of bytes written to rows_sources file ({}). It is a bug.", - sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->rows_sources_temporary_file->finishWriting(); ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -901,12 +828,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - const String & rows_sources_temporary_file_name_, + std::unique_ptr rows_sources_read_buf_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_read_buf(std::move(rows_sources_read_buf_)) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -914,15 +841,13 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { - const auto &header = pipeline.getHeader(); + const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - - auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); + if (!rows_sources_read_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); auto transform = std::make_unique( header, @@ -957,7 +882,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::unique_ptr rows_sources_read_buf; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -1008,7 +933,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - RowsSourcesTemporaryFile::FILE_ID, + ctx->rows_sources_temporary_file->read(), (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -1037,9 +962,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); + builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1401,7 +1326,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - const String & rows_sources_temporary_file_name_, + std::shared_ptr rows_sources_temporary_file_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1411,7 +1336,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_temporary_file(rows_sources_temporary_file_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1421,7 +1346,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1431,14 +1356,6 @@ public: const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - WriteBuffer * rows_sources_write_buf = nullptr; - if (!rows_sources_temporary_file_name.empty()) - { - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); - } - switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1451,14 +1368,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_write_buf, + rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1473,7 +1390,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, cleanup); break; @@ -1486,7 +1403,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; } @@ -1528,7 +1445,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::shared_ptr rows_sources_temporary_file; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1697,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1762,7 +1679,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 5a4fb1ec0b8..a3d72127627 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -41,7 +41,6 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; -class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -235,7 +234,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -314,7 +313,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 76bcf41d6d8..a53d4213cbd 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -113,10 +113,11 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", + LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", + prewhere_actions.dumpConditions(), prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From db2aab199db6e542c5a87c30466c358a2207c30a Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 11:57:11 +0000 Subject: [PATCH 043/267] log --- src/Interpreters/TemporaryDataOnDisk.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c3b24fb783b..6cc49fe83c8 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -58,6 +58,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -91,6 +92,7 @@ public: explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file '{}'", path_to_file); if (max_file_size > 0) { auto reservation = volume->reserve(max_file_size); @@ -129,9 +131,14 @@ public: try { if (disk->exists(path_to_file)) + { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); + } else + { LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } } catch (...) { From b09d3c5479edcecaa041df6e6de7a45a2d407aa8 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 12:01:21 +0000 Subject: [PATCH 044/267] fix --- src/Interpreters/Context.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 6ada12e63f9..f0e29dcdc41 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1256,6 +1256,10 @@ try /// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types. } } + else + { + fs::create_directories(path); + } } catch (...) { From f238530cc5d222f62611214a9434138d79aabefd Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:10:39 +0000 Subject: [PATCH 045/267] w --- src/Interpreters/Aggregator.cpp | 5 ++++ src/Interpreters/Aggregator.h | 2 +- src/Interpreters/GraceHashJoin.cpp | 4 +-- src/Interpreters/HashJoin/HashJoin.cpp | 15 +++++------ src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Interpreters/TemporaryDataOnDisk.h | 25 ++++++++++--------- .../Transforms/MergeSortingTransform.cpp | 4 +-- src/Storages/MergeTree/MergeTask.cpp | 2 +- 8 files changed, 33 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e6fecc37cfa..cdc819d3a32 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1639,6 +1639,11 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } +std::vector & Aggregator::getTemporaryData() +{ + return tmp_files; +} + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index bc28d3dccb8..3ac5ca30ed4 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -311,7 +311,7 @@ public: bool hasTemporaryData() const { return !tmp_files.empty(); } - std::vector & getTemporaryData() { return tmp_files; } + std::vector & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index a2010b7d94b..3fb83c3ce47 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -389,8 +389,8 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); - TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); + TemporaryBlockStreamHolder left_file(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file(prepareRightBlock(right_sample_block), tmp_data.get()); BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index af23b520abb..a2c9f94a6ae 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -59,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; }; @@ -513,9 +513,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { if (!tmp_stream) - tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream.emplace(right_sample_block, tmp_data.get()); - tmp_stream->write(block_to_save); + tmp_stream.value()->write(block_to_save); return true; } @@ -721,13 +721,14 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; if (not_processed) { auto & continuation = static_cast(*not_processed); start_left_row = continuation.left_position; start_right_block = continuation.right_block; - reader = std::move(continuation.reader); + if (continuation.reader) + reader = std::move(*continuation.reader); not_processed.reset(); } @@ -796,9 +797,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { if (!reader) - reader = tmp_stream.getReadStream(); + reader = tmp_stream->getReadStream(); - while (auto block_right = reader->read()) + while (auto block_right = reader.value()->read()) { ++block_number; process_right_block(block_right); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 0f50e110db9..8572c5df096 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -424,7 +424,7 @@ private: /// Needed to do external cross join TemporaryDataOnDiskScopePtr tmp_data; - TemporaryBlockStreamHolder tmp_stream; + std::optional tmp_stream; mutable std::once_flag finish_writing; /// Block with columns from the right-side table. diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index f8d14b00ac5..86fa9e57e81 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -114,18 +114,19 @@ template class WrapperGuard { public: - WrapperGuard() = default; - template WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) - {} + { + chassert(holder); + chassert(impl); + } - Impl * operator->() { return impl.get(); } - const Impl * operator->() const { return impl.get(); } - Impl & operator*() { return *impl; } - const Impl & operator*() const { return *impl; } + Impl * operator->() { chassert(impl); chassert(holder); return impl.get(); } + const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } + Impl & operator*() { chassert(impl); chassert(holder); return *impl; } + const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } operator bool() const { return impl != nullptr; } const Holder * getHolder() const { return holder.get(); } @@ -153,13 +154,13 @@ public: virtual std::unique_ptr write() = 0; virtual std::unique_ptr read(size_t buffer_size) const = 0; - /// Get location for logging purposes + /// Get location for logging virtual String describeFilePath() const = 0; virtual ~TemporaryFileHolder() = default; }; - +/// Reads raw data from temporary file class TemporaryDataReadBuffer : public ReadBuffer { public: @@ -173,7 +174,7 @@ private: WrapperGuard compressed_buf; }; -/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +/// Writes raw data to buffer provided by file_holder, and accounts amount of written data in parent scope. class TemporaryDataBuffer : public WriteBuffer { public: @@ -206,13 +207,13 @@ private: Stat stat; }; + +/// High level interfaces for reading and writing temporary data by blocks. using TemporaryBlockStreamReaderHolder = WrapperGuard; class TemporaryBlockStreamHolder : public WrapperGuard { public: - TemporaryBlockStreamHolder() = default; - TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); TemporaryBlockStreamReaderHolder getReadStream() const; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ba157dabffb..d3299ea651f 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -69,7 +69,7 @@ public: ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_read_stream->read(); + Block block = tmp_read_stream.value()->read(); if (!block) return {}; @@ -79,7 +79,7 @@ public: private: TemporaryBlockStreamHolder tmp_stream; - TemporaryBlockStreamReaderHolder tmp_read_stream; + std::optional tmp_read_stream; LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 5c9d4ea61a2..1009458574e 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -964,7 +964,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); + builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } From 017d9557c5c6d41c671d55c21fb2e8810d231dd3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:33:33 +0000 Subject: [PATCH 046/267] f --- src/Interpreters/Aggregator.cpp | 12 +++++++++++- src/Interpreters/Aggregator.h | 7 ++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index cdc819d3a32..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1519,7 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); + std::unique_lock lk(tmp_files_mutex); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); + lk.unlock(); + ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); @@ -1639,11 +1642,18 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } -std::vector & Aggregator::getTemporaryData() +std::list & Aggregator::getTemporaryData() { return tmp_files; } +bool Aggregator::hasTemporaryData() const +{ + std::lock_guard lk(tmp_files_mutex); + return !tmp_files.empty(); +} + + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3ac5ca30ed4..451583946eb 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return !tmp_files.empty(); } + bool hasTemporaryData() const; - std::vector & getTemporaryData(); + std::list & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; @@ -356,7 +356,8 @@ private: /// For external aggregation. TemporaryDataOnDiskScopePtr tmp_data; - mutable std::vector tmp_files; + mutable std::mutex tmp_files_mutex; + mutable std::list tmp_files; size_t min_bytes_for_prefetch = 0; From a5b9083f2c2f03345f1b14630d9bae8c25996697 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 18 Oct 2024 14:40:47 +0000 Subject: [PATCH 047/267] f --- src/Interpreters/TemporaryDataOnDisk.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 6cc49fe83c8..c0c9d0a80c5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -9,13 +9,14 @@ #include #include #include +#include #include #include #include #include #include -#include "Common/Exception.h" +#include namespace ProfileEvents { @@ -130,7 +131,7 @@ public: ~TemporaryFileOnLocalDisk() override try { - if (disk->exists(path_to_file)) + if (disk->existsFile(path_to_file)) { LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); From e8fdacdeced2a1bab0600524db90fc6cb29aaaf2 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:50:16 +0000 Subject: [PATCH 048/267] fix --- src/Interpreters/Cache/Metadata.cpp | 1 + src/Interpreters/TemporaryDataOnDisk.cpp | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 2ee985b1c31..6a2cca33a13 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,6 +470,7 @@ private: void CacheMetadata::cleanupThreadFunc() { + LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c0c9d0a80c5..ea29afbe1fa 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -59,7 +59,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); - LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); + LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -270,6 +270,9 @@ std::unique_ptr TemporaryDataBuffer::read() { finishWriting(); + if (stat.compressed_size == 0 && stat.uncompressed_size == 0) + return std::make_unique(std::make_unique()); + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); return std::make_unique(file_holder->read(buffer_size)); From 881f1a94ae72433a1b1c49ee76877a3af66b1527 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:52:18 +0000 Subject: [PATCH 049/267] fix tidy --- src/Interpreters/TemporaryDataOnDisk.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index 86fa9e57e81..eab3571dd07 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -115,7 +115,7 @@ class WrapperGuard { public: template - WrapperGuard(std::unique_ptr holder_, Args && ... args) + explicit WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) { @@ -127,7 +127,7 @@ public: const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } Impl & operator*() { chassert(impl); chassert(holder); return *impl; } const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } - operator bool() const { return impl != nullptr; } + operator bool() const { return impl != nullptr; } /// NOLINT const Holder * getHolder() const { return holder.get(); } Holder * getHolder() { return holder.get(); } From 0e702fc56d55900aaf9ce18696f18cd4855d9d17 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:57:32 +0000 Subject: [PATCH 050/267] upd tests/integration/test_tmp_policy/test.py --- tests/integration/test_tmp_policy/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py index 870a70b127a..097f93660b2 100644 --- a/tests/integration/test_tmp_policy/test.py +++ b/tests/integration/test_tmp_policy/test.py @@ -35,8 +35,8 @@ def test_disk_selection(start_cluster): node.query(query, settings=settings) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk1/" + "Writing part of aggregation data into temporary file.*/disk1/" ) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk2/" + "Writing part of aggregation data into temporary file.*/disk2/" ) From 88006f6e0952eb5fda97110124d25b6f3b16bc4c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 21 Oct 2024 21:58:49 +0100 Subject: [PATCH 051/267] impl --- src/Interpreters/HashJoin/HashJoin.cpp | 105 ++++++++++++--------- src/Interpreters/HashJoin/HashJoin.h | 4 +- src/Interpreters/HashJoin/ScatteredBlock.h | 27 ++++++ 3 files changed, 88 insertions(+), 48 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 27179ca5a25..f4777b5f7da 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -30,6 +30,8 @@ #include #include #include +#include "Core/Block.h" +#include "Interpreters/HashJoin/ScatteredBlock.h" #include @@ -85,6 +87,11 @@ Block filterColumnsPresentInSampleBlock(const Block & block, const Block & sampl return filtered_block; } +ScatteredBlock filterColumnsPresentInSampleBlock(const ScatteredBlock & block, const Block & sample_block) +{ + return ScatteredBlock{filterColumnsPresentInSampleBlock(block.getSourceBlock(), sample_block)}; +} + Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, const Names &) { for (const auto & sample_column : sample_block.getColumnsWithTypeAndName()) @@ -104,12 +111,6 @@ Block materializeColumnsFromRightBlock(Block block, const Block & sample_block, JoinCommon::convertColumnToNullable(column); } - // for (const auto & column_name : right_key_names) - // { - // auto & column = block.getByName(column_name).column; - // column = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); - // } - return block; } } @@ -557,7 +558,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) } } - size_t rows = source_block.rows(); + const size_t rows = source_block.rows(); data->rows_to_join += rows; const auto & right_key_names = table_join->getAllNames(JoinTableSide::Right); ColumnPtrMap all_key_columns(right_key_names.size()); @@ -567,7 +568,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); } - Block block_to_save = filterColumnsPresentInSampleBlock(source_block.getSourceBlock(), savedBlockSample()); + ScatteredBlock block_to_save = filterColumnsPresentInSampleBlock(source_block, savedBlockSample()); if (shrink_blocks) block_to_save = block_to_save.shrinkToFit(); @@ -583,7 +584,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) { tmp_stream = &tmp_data->createStream(right_sample_block); } - tmp_stream->write(block_to_save); + tmp_stream->write(block_to_save.getSourceBlock()); return true; } @@ -595,7 +596,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) if (storage_join_lock) throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "addBlockToJoin called when HashJoin locked to prevent updates"); - assertBlocksHaveEqualStructure(data->sample_block, block_to_save, "joined block"); + assertBlocksHaveEqualStructure(data->sample_block, block_to_save.getSourceBlock(), "joined block"); size_t min_bytes_to_compress = table_join->crossJoinMinBytesToCompress(); size_t min_rows_to_compress = table_join->crossJoinMinRowsToCompress(); @@ -609,12 +610,10 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) have_compressed = true; } - /// In case of scattered block we account proportional share of the source block bytes. - /// For not scattered columns it will be trivial (bytes * N / N) calculation. - data->blocks_allocated_size += block_to_save.rows() ? block_to_save.allocatedBytes() * rows / block_to_save.rows() : 0; + data->blocks_allocated_size += block_to_save.allocatedBytes(); doDebugAsserts(); data->blocks.emplace_back(std::move(block_to_save)); - Block * stored_block = &data->blocks.back(); + auto * stored_block = &data->blocks.back(); doDebugAsserts(); if (rows) @@ -679,7 +678,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) map, key_columns, key_sizes[onexpr_idx], - stored_block, + &stored_block->getSourceBlock(), source_block.getSelector(), null_map, join_mask_col.getData(), @@ -687,7 +686,8 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) is_inserted); if (flag_per_row) - used_flags->reinit, MapsAll>>(stored_block); + used_flags->reinit, MapsAll>>( + &stored_block->getSourceBlock()); else if (is_inserted) /// Number of buckets + 1 value from zero storage used_flags->reinit, MapsAll>>(size + 1); @@ -696,14 +696,16 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) if (!flag_per_row && save_nullmap && is_inserted) { - data->blocks_nullmaps_allocated_size += null_map_holder->allocatedBytes(); - data->blocks_nullmaps.emplace_back(stored_block, null_map_holder); + data->blocks_nullmaps_allocated_size + += null_map_holder->size() ? null_map_holder->allocatedBytes() * rows / null_map_holder->size() : 0; + data->blocks_nullmaps.emplace_back(&stored_block->getSourceBlock(), null_map_holder); } if (!flag_per_row && not_joined_map && is_inserted) { - data->blocks_nullmaps_allocated_size += not_joined_map->allocatedBytes(); - data->blocks_nullmaps.emplace_back(stored_block, std::move(not_joined_map)); + data->blocks_nullmaps_allocated_size + += not_joined_map->size() ? not_joined_map->allocatedBytes() * rows / not_joined_map->size() : 0; + data->blocks_nullmaps.emplace_back(&stored_block->getSourceBlock(), std::move(not_joined_map)); } if (!flag_per_row && !is_inserted) @@ -861,7 +863,7 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) } }; - for (const Block & block_right : data->blocks) + for (const auto & block_right : data->blocks) { ++block_number; if (block_number < start_right_block) @@ -869,9 +871,12 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) /// The following statement cannot be substituted with `process_right_block(!have_compressed ? block_right : block_right.decompress())` /// because it will lead to copying of `block_right` even if its branch is taken (because common type of `block_right` and `block_right.decompress()` is `Block`). if (!have_compressed) - process_right_block(block_right); + process_right_block(block_right.getSourceBlock()); else - process_right_block(block_right.decompress()); + { + chassert(!block_right.wasScattered()); /// Compression only happens for cross join + process_right_block(block_right.getSourceBlock().decompress()); + } if (rows_added > max_joined_block_rows) { @@ -1221,14 +1226,14 @@ private: std::any position; std::optional nulls_position; - std::optional used_position; + std::optional used_position; - size_t fillColumnsFromData(const BlocksList & blocks, MutableColumns & columns_right) + size_t fillColumnsFromData(const HashJoin::ScatteredBlocksList & blocks, MutableColumns & columns_right) { if (!position.has_value()) - position = std::make_any(blocks.begin()); + position = std::make_any(blocks.begin()); - auto & block_it = std::any_cast(position); + auto & block_it = std::any_cast(position); auto end = blocks.end(); size_t rows_added = 0; @@ -1288,11 +1293,11 @@ private: for (auto & it = *used_position; it != end && rows_added < max_block_size; ++it) { - const Block & mapped_block = *it; + const auto & mapped_block = *it; for (size_t row = 0; row < mapped_block.rows(); ++row) { - if (!parent.isUsed(&mapped_block, row)) + if (!parent.isUsed(&mapped_block.getSourceBlock(), row)) { for (size_t colnum = 0; colnum < columns_keys_and_right.size(); ++colnum) { @@ -1418,16 +1423,24 @@ void HashJoin::reuseJoinedData(const HashJoin & join) } } -BlocksList HashJoin::releaseJoinedBlocks(bool restructure) +BlocksList HashJoin::releaseJoinedBlocks(bool restructure [[maybe_unused]]) { LOG_TRACE( log, "{}Join data is being released, {} bytes and {} rows in hash table", instance_log_id, getTotalByteCount(), getTotalRowCount()); - BlocksList right_blocks = std::move(data->blocks); + auto extract_source_blocks = [](ScatteredBlocksList && blocks) + { + BlocksList result; + for (auto & block : blocks) + result.emplace_back(std::move(block).getSourceBlock()); + return result; + }; + + ScatteredBlocksList right_blocks = std::move(data->blocks); if (!restructure) { data.reset(); - return right_blocks; + return extract_source_blocks(std::move(right_blocks)); } data->maps.clear(); @@ -1441,7 +1454,7 @@ BlocksList HashJoin::releaseJoinedBlocks(bool restructure) if (!right_blocks.empty()) { positions.reserve(right_sample_block.columns()); - const Block & tmp_block = *right_blocks.begin(); + const Block & tmp_block = right_blocks.begin()->getSourceBlock(); for (const auto & sample_column : right_sample_block) { positions.emplace_back(tmp_block.getPositionByName(sample_column.name)); @@ -1449,12 +1462,12 @@ BlocksList HashJoin::releaseJoinedBlocks(bool restructure) } } - for (Block & saved_block : right_blocks) + for (ScatteredBlock & saved_block : right_blocks) { Block restored_block; for (size_t i = 0; i < positions.size(); ++i) { - auto & column = saved_block.getByPosition(positions[i]); + auto & column = saved_block.getSourceBlock().getByPosition(positions[i]); correctNullabilityInplace(column, is_nullable[i]); restored_block.insert(column); } @@ -1519,7 +1532,6 @@ bool HashJoin::isUsed(const Block * block_ptr, size_t row_idx) const return used_flags->getUsedSafe(block_ptr, row_idx); } - bool HashJoin::needUsedFlagsForPerRightTableRow(std::shared_ptr table_join_) const { if (!table_join_->oneDisjunct()) @@ -1538,7 +1550,7 @@ void HashJoin::tryRerangeRightTableDataImpl(Map & map [[maybe_unused]]) throw Exception(ErrorCodes::LOGICAL_ERROR, "Only left or inner join table can be reranged."); else { - auto merge_rows_into_one_block = [&](BlocksList & blocks, RowRefList & rows_ref) + auto merge_rows_into_one_block = [&](ScatteredBlocksList & blocks, RowRefList & rows_ref) { auto it = rows_ref.begin(); if (it.ok()) @@ -1550,7 +1562,7 @@ void HashJoin::tryRerangeRightTableDataImpl(Map & map [[maybe_unused]]) { return; } - auto & block = blocks.back(); + auto & block = blocks.back().getSourceBlock(); size_t start_row = block.rows(); for (; it.ok(); ++it) { @@ -1567,23 +1579,22 @@ void HashJoin::tryRerangeRightTableDataImpl(Map & map [[maybe_unused]]) } }; - auto visit_rows_map = [&](BlocksList & blocks, MapsAll & rows_map) + auto visit_rows_map = [&](ScatteredBlocksList & blocks, MapsAll & rows_map) { switch (data->type) { - #define M(TYPE) \ - case Type::TYPE: \ - {\ - rows_map.TYPE->forEachMapped([&](RowRefList & rows_ref) { merge_rows_into_one_block(blocks, rows_ref); }); \ - break; \ - } +#define M(TYPE) \ + case Type::TYPE: { \ + rows_map.TYPE->forEachMapped([&](RowRefList & rows_ref) { merge_rows_into_one_block(blocks, rows_ref); }); \ + break; \ + } APPLY_FOR_JOIN_VARIANTS(M) - #undef M +#undef M default: break; } }; - BlocksList sorted_blocks; + ScatteredBlocksList sorted_blocks; visit_rows_map(sorted_blocks, map); doDebugAsserts(); data->blocks.swap(sorted_blocks); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index a76c60aab5a..59cc495e72f 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -337,6 +337,8 @@ public: using RawBlockPtr = const Block *; using BlockNullmapList = std::deque>; + using ScatteredBlocksList = std::list; + struct RightTableData { Type type = Type::EMPTY; @@ -344,7 +346,7 @@ public: std::vector maps; Block sample_block; /// Block as it would appear in the BlockList - BlocksList blocks; /// Blocks of "right" table. + ScatteredBlocksList blocks; /// Blocks of "right" table. BlockNullmapList blocks_nullmaps; /// Nullmaps for blocks of "right" table (if needed) /// Additional data - strings for string keys and continuation elements of single-linked lists of references to rows. diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index c114bb957c2..da4a19ceafb 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -6,6 +6,9 @@ #include #include +#include +#include + #include namespace DB @@ -225,6 +228,30 @@ struct ScatteredBlock : private boost::noncopyable /// Accounts only selected rows size_t rows() const { return selector.size(); } + /// In case of scattered block we account proportional share of the source block bytes. + /// For not scattered columns it will be trivial (bytes * N / N) calculation. + size_t allocatedBytes() const { return block.rows() ? block.allocatedBytes() * rows() / block.rows() : 0; } + + ScatteredBlock shrinkToFit() const + { + if (wasScattered()) + { + LOG_TEST(getLogger("HashJoin"), "shrinkToFit() is not supported for ScatteredBlock because blocks are shared"); + return ScatteredBlock{block}; + } + return ScatteredBlock{block.shrinkToFit()}; + } + + ScatteredBlock compress() const + { + chassert(!wasScattered()); + if (wasScattered()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot compress scattered block"); + return ScatteredBlock{block.compress()}; + } + + const auto & getByPosition(size_t i) const { return block.getByPosition(i); } + /// Whether `block` was scattered, i.e. `selector` != [0, block.rows()) bool wasScattered() const { From dd4246725066804b0a5c50a3e6d28f114efb6c4f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 22 Oct 2024 13:00:14 +0100 Subject: [PATCH 052/267] better --- src/Interpreters/HashJoin/HashJoin.cpp | 9 +++------ src/Interpreters/HashJoin/HashJoinMethods.h | 17 ++++++++++++----- src/Interpreters/HashJoin/HashJoinMethodsImpl.h | 4 ++-- src/Interpreters/HashJoin/ScatteredBlock.h | 3 +-- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index f4777b5f7da..0d8f12ccb23 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -30,9 +30,6 @@ #include #include #include -#include "Core/Block.h" -#include "Interpreters/HashJoin/ScatteredBlock.h" - #include #include @@ -579,11 +576,11 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) && (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { - chassert(!source_block.wasScattered()); /// We don't run parallel_hash for cross join if (tmp_stream == nullptr) { tmp_stream = &tmp_data->createStream(right_sample_block); } + chassert(!source_block.wasScattered()); /// We don't run parallel_hash for cross join tmp_stream->write(block_to_save.getSourceBlock()); return true; } @@ -613,7 +610,7 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) data->blocks_allocated_size += block_to_save.allocatedBytes(); doDebugAsserts(); data->blocks.emplace_back(std::move(block_to_save)); - auto * stored_block = &data->blocks.back(); + const auto * stored_block = &data->blocks.back(); doDebugAsserts(); if (rows) @@ -874,7 +871,7 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) process_right_block(block_right.getSourceBlock()); else { - chassert(!block_right.wasScattered()); /// Compression only happens for cross join + chassert(!block_right.wasScattered()); /// Compression only happens for cross join and scattering only for concurrent hash process_right_block(block_right.getSourceBlock().decompress()); } diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index a72087059b3..10fb50a6b83 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -19,7 +19,7 @@ template struct Inserter { static ALWAYS_INLINE bool - insertOne(const HashJoin & join, HashMap & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) + insertOne(const HashJoin & join, HashMap & map, KeyGetter & key_getter, const Block * stored_block, size_t i, Arena & pool) { auto emplace_result = key_getter.emplaceKey(map, i, pool); @@ -31,7 +31,8 @@ struct Inserter return false; } - static ALWAYS_INLINE void insertAll(const HashJoin &, HashMap & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) + static ALWAYS_INLINE void + insertAll(const HashJoin &, HashMap & map, KeyGetter & key_getter, const Block * stored_block, size_t i, Arena & pool) { auto emplace_result = key_getter.emplaceKey(map, i, pool); @@ -45,7 +46,13 @@ struct Inserter } static ALWAYS_INLINE void insertAsof( - HashJoin & join, HashMap & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool, const IColumn & asof_column) + HashJoin & join, + HashMap & map, + KeyGetter & key_getter, + const Block * stored_block, + size_t i, + Arena & pool, + const IColumn & asof_column) { auto emplace_result = key_getter.emplaceKey(map, i, pool); typename HashMap::mapped_type * time_series_map = &emplace_result.getMapped(); @@ -68,7 +75,7 @@ public: MapsTemplate & maps, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, - Block * stored_block, + const Block * stored_block, const ScatteredBlock::Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, @@ -101,7 +108,7 @@ private: HashMap & map, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, - Block * stored_block, + const Block * stored_block, const Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 17c8c821bef..5af34689609 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -18,7 +18,7 @@ size_t HashJoinMethods::insertFromBlockImpl( MapsTemplate & maps, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, - Block * stored_block, + const Block * stored_block, const ScatteredBlock::Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, @@ -204,7 +204,7 @@ size_t HashJoinMethods::insertFromBlockImplTypeC HashMap & map, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, - Block * stored_block, + const Block * stored_block, const Selector & selector, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index da4a19ceafb..8425b3ebd3e 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -244,9 +244,8 @@ struct ScatteredBlock : private boost::noncopyable ScatteredBlock compress() const { - chassert(!wasScattered()); if (wasScattered()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot compress scattered block"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot compress ScatteredBlock"); return ScatteredBlock{block.compress()}; } From d4e440adb8c70bd46d98cc235ac26321896e18ac Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 22 Oct 2024 14:11:01 +0100 Subject: [PATCH 053/267] bring check back --- src/Interpreters/HashJoin/HashJoin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 0d8f12ccb23..ee643e45d02 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -385,7 +385,7 @@ size_t HashJoin::getTotalRowCount() const void HashJoin::doDebugAsserts() const { -#if !defined(NDEBUG) && 0 +#ifndef NDEBUG size_t debug_blocks_allocated_size = 0; for (const auto & block : data->blocks) debug_blocks_allocated_size += block.allocatedBytes(); @@ -607,8 +607,8 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) have_compressed = true; } - data->blocks_allocated_size += block_to_save.allocatedBytes(); doDebugAsserts(); + data->blocks_allocated_size += block_to_save.allocatedBytes(); data->blocks.emplace_back(std::move(block_to_save)); const auto * stored_block = &data->blocks.back(); doDebugAsserts(); From eccf5dd15e91663adb0c54e045f6c87e789656b3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 22 Oct 2024 13:19:42 +0000 Subject: [PATCH 054/267] better error message --- src/Interpreters/TemporaryDataOnDisk.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index ea29afbe1fa..60bfd379a72 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include @@ -98,7 +100,24 @@ public: { auto reservation = volume->reserve(max_file_size); if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + { + auto disks = volume->getDisks(); + Strings disks_info; + for (const auto & d : disks) + { + auto to_double = [](auto x) { return static_cast(x); }; + disks_info.push_back(fmt::format("{}: available: {} unreserved: {}, total: {}, keeping: {}", + d->getName(), + ReadableSize(d->getAvailableSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getUnreservedSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getTotalSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getKeepingFreeSpace()))); + } + + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, + "Not enough space on temporary disk, cannot reserve {} bytes on [{}]", + max_file_size, fmt::join(disks_info, ", ")); + } disk = reservation->getDisk(); } else From d8f2540b3ca426c98a8562a45b17a684a8d5a381 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 23 Oct 2024 16:37:00 +0100 Subject: [PATCH 055/267] fix --- src/Interpreters/HashJoin/HashJoin.cpp | 12 ++++++------ src/Interpreters/HashJoin/HashJoin.h | 11 +++++++++-- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index ee643e45d02..5e372e9bdcd 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -399,7 +399,7 @@ void HashJoin::doDebugAsserts() const size_t debug_blocks_nullmaps_allocated_size = 0; for (const auto & nullmap : data->blocks_nullmaps) - debug_blocks_nullmaps_allocated_size += nullmap.second->allocatedBytes(); + debug_blocks_nullmaps_allocated_size += nullmap.allocatedBytes(); if (data->blocks_nullmaps_allocated_size != debug_blocks_nullmaps_allocated_size) throw Exception( @@ -695,14 +695,14 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) { data->blocks_nullmaps_allocated_size += null_map_holder->size() ? null_map_holder->allocatedBytes() * rows / null_map_holder->size() : 0; - data->blocks_nullmaps.emplace_back(&stored_block->getSourceBlock(), null_map_holder); + data->blocks_nullmaps.emplace_back(stored_block, null_map_holder); } if (!flag_per_row && not_joined_map && is_inserted) { data->blocks_nullmaps_allocated_size += not_joined_map->size() ? not_joined_map->allocatedBytes() * rows / not_joined_map->size() : 0; - data->blocks_nullmaps.emplace_back(&stored_block->getSourceBlock(), std::move(not_joined_map)); + data->blocks_nullmaps.emplace_back(stored_block, std::move(not_joined_map)); } if (!flag_per_row && !is_inserted) @@ -1347,10 +1347,10 @@ private: for (auto & it = *nulls_position; it != end && rows_added < max_block_size; ++it) { - const auto * block = it->first; + const auto * block = it->block; ConstNullMapPtr nullmap = nullptr; - if (it->second) - nullmap = &assert_cast(*it->second).getData(); + if (it->column) + nullmap = &assert_cast(*it->column).getData(); for (size_t row = 0; row < block->rows(); ++row) { diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 59cc495e72f..64cd5cf5a4e 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -334,8 +334,15 @@ public: using MapsVariant = std::variant; - using RawBlockPtr = const Block *; - using BlockNullmapList = std::deque>; + using RawBlockPtr = const ScatteredBlock *; + struct NullMapHolder + { + size_t allocatedBytes() const { return column->size() ? column->allocatedBytes() * block->rows() / column->size() : 0; } + + RawBlockPtr block; + ColumnPtr column; + }; + using BlockNullmapList = std::deque; using ScatteredBlocksList = std::list; From 69e9dd9b3dff6ba787cb5e3cb44c705ff61228a0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 23 Oct 2024 15:07:49 +0100 Subject: [PATCH 056/267] fix --- src/Interpreters/ConcurrentHashJoin.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index f2d1d1418a4..88559e26c71 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -246,8 +246,13 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std: chassert(res.empty()); res.clear(); res.reserve(dispatched_blocks.size()); - std::ranges::transform( - dispatched_blocks, std::back_inserter(res), [](ScatteredBlock & res_block) { return std::move(res_block).getSourceBlock(); }); + for (auto && res_block : dispatched_blocks) + { + if (res_block.rows()) + res.emplace_back(std::move(res_block).getSourceBlock()); + } + if (res.empty()) + res.emplace_back(dispatched_blocks[0].getSourceBlock()); } void ConcurrentHashJoin::checkTypesOfKeys(const Block & block) const From b04e7abd2ec8729f97424d39908a3b82e72f0057 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 24 Oct 2024 14:18:33 +0100 Subject: [PATCH 057/267] stash --- src/Interpreters/HashJoin/AddedColumns.h | 2 +- src/Interpreters/HashJoin/HashJoinMethodsImpl.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index d60ccf0038d..4abac0c09d1 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -72,7 +72,7 @@ public: , left_block(left_block_.getSourceBlock()) , join_on_keys(join_on_keys_) , additional_filter_expression(additional_filter_expression_) - , rows_to_add(left_block.rows()) + , rows_to_add(left_block_.rows()) , join_data_avg_perkey_rows(join.getJoinedData()->avgPerKeyRows()) , output_by_row_list_threshold(join.getTableJoin().outputByRowListPerkeyRowsThreshold()) , join_data_sorted(join.getJoinedData()->sorted) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 5af34689609..5cfb5d469e1 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -412,8 +412,8 @@ size_t HashJoinMethods::joinRightColumns( { if (unlikely(current_offset >= max_joined_block_rows)) { - added_columns.offsets_to_replicate->resize(ind); - added_columns.filter.resize(ind); + added_columns.offsets_to_replicate->resize(i); + added_columns.filter.resize(i); break; } } From d848a3b2bbb656807ace0bd4e32b3a6588519047 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 24 Oct 2024 19:31:13 +0100 Subject: [PATCH 058/267] stash --- src/Interpreters/ConcurrentHashJoin.cpp | 4 +++- src/Interpreters/HashJoin/AddedColumns.h | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 88559e26c71..139e093ad07 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -26,6 +26,7 @@ #include #include #include +#include "Core/Defines.h" #include @@ -238,7 +239,8 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std: std::shared_ptr none_extra_block; auto & hash_join = hash_joins[i]; auto & dispatched_block = dispatched_blocks[i]; - hash_join->data->joinBlock(dispatched_block, none_extra_block); + if ((i == 0 && block.rows() == 0) || dispatched_block.rows()) + hash_join->data->joinBlock(dispatched_block, none_extra_block); if (none_extra_block && !none_extra_block->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "not_processed should be empty"); } diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 4abac0c09d1..3e6a29da6c3 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -167,7 +167,7 @@ public: return; /// Do not allow big allocations when user set max_joined_block_rows to huge value - size_t reserve_size = std::min(max_joined_block_rows, DEFAULT_BLOCK_SIZE * 2); + size_t reserve_size = std::min(max_joined_block_rows, rows_to_add * 2); /// rows_to_add if (need_replicate) /// Reserve 10% more space for columns, because some rows can be repeated @@ -226,7 +226,7 @@ private: void addColumn(const ColumnWithTypeAndName & src_column, const std::string & qualified_name) { columns.push_back(src_column.column->cloneEmpty()); - columns.back()->reserve(src_column.column->size()); + columns.back()->reserve(rows_to_add); type_name.emplace_back(src_column.type, src_column.name, qualified_name); } From c2abeca568a7118f4638e7de73f4655736b2e72a Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 24 Oct 2024 19:35:17 +0100 Subject: [PATCH 059/267] stash --- src/Interpreters/HashJoin/HashJoin.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 5e372e9bdcd..a3773c0fe62 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -996,6 +996,7 @@ void HashJoin::checkTypesOfKeys(const Block & block) const void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) { + LOG_DEBUG(&Poco::Logger::get("debug"), "block.rows()={}", block.rows()); if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); @@ -1062,6 +1063,7 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) { + LOG_DEBUG(&Poco::Logger::get("debug"), "block.rows()={}", block.rows()); if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); From 349af95cd1e2c998391bec9710bacb0458175835 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 10:45:32 +0000 Subject: [PATCH 060/267] fix data race --- src/Interpreters/Cache/Metadata.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 6a2cca33a13..2ee985b1c31 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,7 +470,6 @@ private: void CacheMetadata::cleanupThreadFunc() { - LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; From 084f878fb19995763e3db825752dc61c9d768b43 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:10:33 +0000 Subject: [PATCH 061/267] log --- src/Interpreters/Aggregator.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index bb9e22e5a1b..2dd6513d498 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,6 +1504,7 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1520,6 +1521,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2932,6 +2934,7 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } From 54b93953847699f1f9d14939bd1e0067d933dbba Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:11:19 +0000 Subject: [PATCH 062/267] fix typo --- src/Storages/MergeTree/MergeTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 1009458574e..d781cef9f17 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -1614,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, From 292800eb0a198b9d8dd89e864028dc9c95039213 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 25 Oct 2024 13:01:18 +0100 Subject: [PATCH 063/267] stash --- src/Interpreters/ConcurrentHashJoin.cpp | 11 ++++------- src/Interpreters/HashJoin/HashJoin.cpp | 2 -- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 139e093ad07..6ebf72bd33e 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -26,7 +26,6 @@ #include #include #include -#include "Core/Defines.h" #include @@ -239,7 +238,7 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std: std::shared_ptr none_extra_block; auto & hash_join = hash_joins[i]; auto & dispatched_block = dispatched_blocks[i]; - if ((i == 0 && block.rows() == 0) || dispatched_block.rows()) + if (i == 0 || dispatched_block.rows()) hash_join->data->joinBlock(dispatched_block, none_extra_block); if (none_extra_block && !none_extra_block->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "not_processed should be empty"); @@ -248,13 +247,11 @@ void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std: chassert(res.empty()); res.clear(); res.reserve(dispatched_blocks.size()); - for (auto && res_block : dispatched_blocks) + for (size_t i = 0; i < dispatched_blocks.size(); ++i) { - if (res_block.rows()) - res.emplace_back(std::move(res_block).getSourceBlock()); + if (i == 0 || dispatched_blocks[i].rows()) + res.emplace_back(std::move(dispatched_blocks[i]).getSourceBlock()); } - if (res.empty()) - res.emplace_back(dispatched_blocks[0].getSourceBlock()); } void ConcurrentHashJoin::checkTypesOfKeys(const Block & block) const diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index a3773c0fe62..5e372e9bdcd 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -996,7 +996,6 @@ void HashJoin::checkTypesOfKeys(const Block & block) const void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) { - LOG_DEBUG(&Poco::Logger::get("debug"), "block.rows()={}", block.rows()); if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); @@ -1063,7 +1062,6 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) { - LOG_DEBUG(&Poco::Logger::get("debug"), "block.rows()={}", block.rows()); if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); From 5f93afede52bc7ce4672b3ac59d4776590a190cc Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 25 Oct 2024 13:20:00 +0100 Subject: [PATCH 064/267] stash --- src/Interpreters/HashJoin/HashJoin.cpp | 6 ++---- src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Interpreters/HashJoin/ScatteredBlock.h | 2 -- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 5e372e9bdcd..526ac456f7f 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -693,16 +693,14 @@ bool HashJoin::addBlockToJoin(ScatteredBlock & source_block, bool check_limits) if (!flag_per_row && save_nullmap && is_inserted) { - data->blocks_nullmaps_allocated_size - += null_map_holder->size() ? null_map_holder->allocatedBytes() * rows / null_map_holder->size() : 0; data->blocks_nullmaps.emplace_back(stored_block, null_map_holder); + data->blocks_nullmaps_allocated_size += data->blocks_nullmaps.back().allocatedBytes(); } if (!flag_per_row && not_joined_map && is_inserted) { - data->blocks_nullmaps_allocated_size - += not_joined_map->size() ? not_joined_map->allocatedBytes() * rows / not_joined_map->size() : 0; data->blocks_nullmaps.emplace_back(stored_block, std::move(not_joined_map)); + data->blocks_nullmaps_allocated_size += data->blocks_nullmaps.back().allocatedBytes(); } if (!flag_per_row && !is_inserted) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 64cd5cf5a4e..9e42d58e6b0 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -337,7 +337,7 @@ public: using RawBlockPtr = const ScatteredBlock *; struct NullMapHolder { - size_t allocatedBytes() const { return column->size() ? column->allocatedBytes() * block->rows() / column->size() : 0; } + size_t allocatedBytes() const { return !column->empty() ? column->allocatedBytes() * block->rows() / column->size() : 0; } RawBlockPtr block; ColumnPtr column; diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 8425b3ebd3e..d8077c30c53 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -254,13 +254,11 @@ struct ScatteredBlock : private boost::noncopyable /// Whether `block` was scattered, i.e. `selector` != [0, block.rows()) bool wasScattered() const { - chassert(block); return selector.size() != block.rows(); } const ColumnWithTypeAndName & getByName(const std::string & name) const { - chassert(block); return block.getByName(name); } From c952d9d8153ce59458fdb69a208b361c7454cab1 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 25 Oct 2024 20:55:51 +0000 Subject: [PATCH 065/267] Right JOIN with parallel replicas --- .../ClusterProxy/executeQuery.cpp | 4 +- src/Planner/PlannerJoinTree.cpp | 2 + src/Planner/findParallelReplicasQuery.cpp | 52 ++++++++++++---- src/Storages/buildQueryTreeForShard.cpp | 62 +++++++++++++------ 4 files changed, 86 insertions(+), 34 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index e88fdeb0379..4b1f3094be3 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..7889a358d95 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,6 +665,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); + const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index b97a9a36381..891e5034f44 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -100,14 +100,19 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_kind = join_node.getKind(); auto join_strictness = join_node.getStrictness(); - bool can_parallelize_join = - join_kind == JoinKind::Left - || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All); - - if (!can_parallelize_join) + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + } + else + { return {}; + } - query_tree_node = join_node.getLeftTableExpression().get(); break; } default: @@ -310,13 +315,15 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - std::stack right_join_nodes; - while (query_tree_node || !right_join_nodes.empty()) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + + std::stack join_nodes; + while (query_tree_node || !join_nodes.empty()) { if (!query_tree_node) { - query_tree_node = right_join_nodes.top(); - right_join_nodes.pop(); + query_tree_node = join_nodes.top(); + join_nodes.pop(); } auto join_tree_node_type = query_tree_node->getNodeType(); @@ -365,8 +372,23 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - query_tree_node = join_node.getLeftTableExpression().get(); - right_join_nodes.push(join_node.getRightTableExpression().get()); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); + + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner and join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + join_nodes.push(join_node.getRightTableExpression().get()); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + join_nodes.push(join_node.getLeftTableExpression().get()); + } + else + { + return nullptr; + } break; } default: @@ -400,7 +422,9 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - return findTableForParallelReplicas(query_tree_node.get()); + const auto * res = findTableForParallelReplicas(query_tree_node.get()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + return res; } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -408,6 +432,8 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index bbf32c68d19..df9bfd049fb 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -314,6 +314,35 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, return temporary_table_expression_node; } +QueryTreeNodePtr getSubqueryFromTableExpression( + const QueryTreeNodePtr & join_table_expression, + const std::unordered_map & column_source_to_columns, + const ContextPtr & context) +{ + auto join_table_expression_node_type = join_table_expression->getNodeType(); + QueryTreeNodePtr subquery_node; + + if (join_table_expression_node_type == QueryTreeNodeType::QUERY || join_table_expression_node_type == QueryTreeNodeType::UNION) + { + subquery_node = join_table_expression; + } + else if ( + join_table_expression_node_type == QueryTreeNodeType::TABLE || join_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + { + const auto & columns = column_source_to_columns.at(join_table_expression).columns; + subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, join_table_expression, context); + } + else + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", + join_table_expression->formatASTForErrorMessage()); + } + + return subquery_node; +} + } QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_context, QueryTreeNodePtr query_tree_to_modify) @@ -335,37 +364,32 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { if (auto * join_node = global_in_or_join_node.query_node->as()) { - auto join_right_table_expression = join_node->getRightTableExpression(); - auto join_right_table_expression_node_type = join_right_table_expression->getNodeType(); - - QueryTreeNodePtr subquery_node; - - if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY || - join_right_table_expression_node_type == QueryTreeNodeType::UNION) + QueryTreeNodePtr join_table_expression; + const auto join_kind = join_node->getKind(); + const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) { - subquery_node = join_right_table_expression; + join_table_expression = join_node->getRightTableExpression(); } - else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE || - join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + else if (join_kind == JoinKind::Right) { - const auto & columns = column_source_to_columns.at(join_right_table_expression).columns; - subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, - join_right_table_expression, - planner_context->getQueryContext()); + join_table_expression = join_node->getLeftTableExpression(); } else { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", - join_right_table_expression->formatASTForErrorMessage()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Unexpected join kind: {}", join_kind); } + auto subquery_node + = getSubqueryFromTableExpression(join_table_expression, column_source_to_columns, planner_context->getQueryContext()); + auto temporary_table_expression_node = executeSubqueryNode(subquery_node, planner_context->getMutableQueryContext(), global_in_or_join_node.subquery_depth); - temporary_table_expression_node->setAlias(join_right_table_expression->getAlias()); + temporary_table_expression_node->setAlias(join_table_expression->getAlias()); - replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node)); + replacement_map.emplace(join_table_expression.get(), std::move(temporary_table_expression_node)); continue; } if (auto * in_function_node = global_in_or_join_node.query_node->as()) From aeffae571c91909a7196bec6e952026932c43cc6 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Sat, 26 Oct 2024 19:35:33 +0200 Subject: [PATCH 066/267] CI: Functional Tests with praktika --- .github/workflows/pr.yaml | 287 ++++++++++++++++++++ .github/workflows/pull_request.yml | 212 --------------- ci/jobs/build_clickhouse.py | 12 +- ci/jobs/check_style.py | 2 +- ci/jobs/fast_test.py | 2 +- ci/jobs/functional_stateless_tests.py | 48 ++++ ci/jobs/scripts/functional_tests_results.py | 3 +- ci/praktika/__main__.py | 23 +- ci/praktika/_environment.py | 3 +- ci/praktika/_settings.py | 3 +- ci/praktika/hook_cache.py | 6 +- ci/praktika/json.html | 50 +++- ci/praktika/result.py | 2 +- ci/praktika/runner.py | 56 ++-- ci/praktika/yaml_generator.py | 6 +- ci/settings/definitions.py | 1 + ci/settings/settings.py | 2 + ci/workflows/pull_request.py | 16 +- 18 files changed, 477 insertions(+), 257 deletions(-) create mode 100644 .github/workflows/pr.yaml delete mode 100644 .github/workflows/pull_request.yml create mode 100644 ci/jobs/functional_stateless_tests.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000000..34c794f6088 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,287 @@ +# generated by praktika + +name: PR + +on: + pull_request: + branches: ['master'] + +# Cancel the previous wf run in PRs. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} + +# Allow updating GH commit statuses and PR comments to post an actual job reports link +permissions: write-all + +jobs: + + config_workflow: + runs-on: [ci_services] + needs: [] + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + docker_builds: + runs-on: [ci_services_ebs] + needs: [config_workflow] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIEJ1aWxkcw==') }} + name: "Docker Builds" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + style_check: + runs-on: [ci_services] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgQ2hlY2s=') }} + name: "Style Check" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + fast_test: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }} + name: "Fast test" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + build_amd64_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} + name: "Build amd64 debug" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd64_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} + name: "Stateless tests (amd, debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + finish_workflow: + runs-on: [ci_services] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + if: ${{ !cancelled() }} + name: "Finish Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml deleted file mode 100644 index e4eb44b2774..00000000000 --- a/.github/workflows/pull_request.yml +++ /dev/null @@ -1,212 +0,0 @@ -# yamllint disable rule:comments-indentation -name: PullRequestCI - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - pull_request: - types: - - synchronize - - reopened - - opened - branches: - - master - -# Cancel the previous wf run in PRs. -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - RunConfig: - runs-on: [self-hosted, style-checker-aarch64] - outputs: - data: ${{ steps.runconfig.outputs.CI_DATA }} - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true # to ensure correct digests - fetch-depth: 0 # to get a version - filter: tree:0 - - name: Debug Info - uses: ./.github/actions/debug - - name: Set pending Sync status - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status - - name: Labels check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 run_check.py - - name: Python unit tests - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - echo "Testing the main ci directory" - python3 -m unittest discover -s . -p 'test_*.py' - - name: PrepareRunConfig - id: runconfig - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json - - echo "::group::CI configuration" - python3 -m json.tool ${{ runner.temp }}/ci_run_data.json - echo "::endgroup::" - - { - echo 'CI_DATA<> "$GITHUB_OUTPUT" - - name: Re-create GH statuses for skipped jobs if any - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses - BuildDockers: - needs: [RunConfig] - if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }} - uses: ./.github/workflows/docker_test_images.yml - with: - data: ${{ needs.RunConfig.outputs.data }} - StyleCheck: - needs: [RunConfig, BuildDockers] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}} - uses: ./.github/workflows/reusable_test.yml - with: - test_name: Style check - runner_type: style-checker-aarch64 - run_command: | - python3 style_check.py - data: ${{ needs.RunConfig.outputs.data }} - secrets: - secret_envs: | - ROBOT_CLICKHOUSE_SSH_KEY< "$WORKFLOW_RESULT_FILE" << 'EOF' - ${{ toJson(needs) }} - EOF - python3 merge_pr.py --set-ci-status - - name: Check Workflow results - uses: ./.github/actions/check_workflow - with: - needs: ${{ toJson(needs) }} - - ################################# Stage Final ################################# - # - FinishCheck: - if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - filter: tree:0 - - name: Finish label - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - -############################################################################################# -###################################### JEPSEN TESTS ######################################### -############################################################################################# - # This is special test NOT INCLUDED in FinishCheck - # When it's skipped, all dependent tasks will be skipped too. - # DO NOT add it there - Jepsen: - # we need concurrency as the job uses dedicated instances in the cloud - concurrency: - group: jepsen - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }} - needs: [RunConfig, Builds_1] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: ClickHouse Keeper Jepsen - runner_type: style-checker-aarch64 - data: ${{ needs.RunConfig.outputs.data }} diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 21ed8091608..cfa358b4059 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -13,8 +13,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) + parser.add_argument( + "--param", + help="Optional user-defined job start stage (for local run)", + default=None, + ) return parser.parse_args() @@ -95,7 +101,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/check_style.py b/ci/jobs/check_style.py index f9cdc76302d..d4b81abc92c 100644 --- a/ci/jobs/check_style.py +++ b/ci/jobs/check_style.py @@ -379,4 +379,4 @@ if __name__ == "__main__": ) ) - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index 1dcd65b6ed2..dc5e1c975a6 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -330,7 +330,7 @@ def main(): CH.terminate() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py new file mode 100644 index 00000000000..dfdd5821a19 --- /dev/null +++ b/ci/jobs/functional_stateless_tests.py @@ -0,0 +1,48 @@ +import argparse + +from praktika.result import Result +from praktika.settings import Settings +from praktika.utils import MetaClasses, Shell, Utils + + +class JobStages(metaclass=MetaClasses.WithIter): + CHECKOUT_SUBMODULES = "checkout" + CMAKE = "cmake" + BUILD = "build" + + +def parse_args(): + parser = argparse.ArgumentParser(description="ClickHouse Build Job") + parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument("--param", help="Optional custom job start stage", default=None) + return parser.parse_args() + + +def main(): + + args = parse_args() + + stop_watch = Utils.Stopwatch() + + stages = list(JobStages) + stage = args.param or JobStages.CHECKOUT_SUBMODULES + if stage: + assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" + print(f"Job will start from stage [{stage}]") + while stage in stages: + stages.pop(0) + stages.insert(0, stage) + + res = True + results = [] + + if res and JobStages.CHECKOUT_SUBMODULES in stages: + info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") + results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + res = results[-1].is_ok() + + Result.create_from(results=results, stopwatch=stop_watch).complete_job() + + +if __name__ == "__main__": + main() diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index 5ac9d6b985d..aba3e4f7f5b 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -1,7 +1,6 @@ import dataclasses from typing import List -from praktika.environment import Environment from praktika.result import Result OK_SIGN = "[ OK " @@ -250,7 +249,7 @@ class FTResultsProcessor: # test_results.sort(key=test_result_comparator) return Result.create_from( - name=Environment.JOB_NAME, + name="Tests", results=test_results, status=state, files=[self.tests_output_file], diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index 7f472ecd9ae..fbb9f92909a 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,24 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--pr", + help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", + type=int, + default=None, + ) + run_parser.add_argument( + "--sha", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that sha, head sha will be used if not set", + type=str, + default=None, + ) + run_parser.add_argument( + "--branch", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that branch, main branch name will be used if not set", + type=str, + default=None, + ) run_parser.add_argument( "--ci", help="When not set - dummy env will be generated, for local test", @@ -85,9 +103,12 @@ if __name__ == "__main__": workflow=workflow, job=job, docker=args.docker, - dummy_env=not args.ci, + local_run=not args.ci, no_docker=args.no_docker, param=args.param, + pr=args.pr, + branch=args.branch, + sha=args.sha, ) else: parser.print_help() diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index ce9c6f5b486..4ac8ad319f9 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -159,7 +159,8 @@ class _Environment(MetaClasses.Serializable): @classmethod def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False): prefix = "" - if pr_number > 0: + assert sha or latest + if pr_number and pr_number > 0: prefix += f"{pr_number}" else: prefix += f"{branch}" diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 3052d8ef877..1777257f484 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -1,5 +1,4 @@ import dataclasses -from pathlib import Path from typing import Dict, Iterable, List, Optional @@ -8,6 +7,7 @@ class _Settings: ###################################### # Pipeline generation settings # ###################################### + MAIN_BRANCH = "main" CI_PATH = "./ci" WORKFLOW_PATH_PREFIX: str = "./.github/workflows" WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" @@ -111,6 +111,7 @@ _USER_DEFINED_SETTINGS = [ "CI_DB_INSERT_TIMEOUT_SEC", "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", + "MAIN_BRANCH", ] diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index b1b5c654f20..5cfedec0144 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -8,11 +8,9 @@ from praktika.utils import Utils class CacheRunnerHooks: @classmethod - def configure(cls, _workflow): - workflow_config = RunConfig.from_fs(_workflow.name) + def configure(cls, workflow): + workflow_config = RunConfig.from_fs(workflow.name) cache = Cache() - assert _Environment.get().WORKFLOW_NAME - workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0] print(f"Workflow Configure, workflow [{workflow.name}]") assert ( workflow.enable_cache diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 2f8c3e45d0b..af03ed702f8 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -89,6 +89,17 @@ letter-spacing: -0.5px; } + .dropdown-value { + width: 100px; + font-weight: normal; + font-family: inherit; + background-color: transparent; + color: inherit; + /*border: none;*/ + /*outline: none;*/ + /*cursor: pointer;*/ + } + #result-container { background-color: var(--tile-background); margin-left: calc(var(--status-width) + 20px); @@ -282,6 +293,12 @@ } } + function updateUrlParameter(paramName, paramValue) { + const url = new URL(window.location.href); + url.searchParams.set(paramName, paramValue); + window.location.href = url.toString(); + } + // Attach the toggle function to the click event of the icon document.getElementById('theme-toggle').addEventListener('click', toggleTheme); @@ -291,14 +308,14 @@ const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; const month = monthNames[date.getMonth()]; - const year = date.getFullYear(); + //const year = date.getFullYear(); const hours = String(date.getHours()).padStart(2, '0'); const minutes = String(date.getMinutes()).padStart(2, '0'); const seconds = String(date.getSeconds()).padStart(2, '0'); //const milliseconds = String(date.getMilliseconds()).padStart(2, '0'); return showDate - ? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}` + ? `${day}'${month} ${hours}:${minutes}:${seconds}` : `${hours}:${minutes}:${seconds}`; } @@ -346,7 +363,7 @@ return 'status-other'; } - function addKeyValueToStatus(key, value) { + function addKeyValueToStatus(key, value, options = null) { const statusContainer = document.getElementById('status-container'); @@ -357,10 +374,25 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - const valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - + let valueElement + if (value) { + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value; + } else if (options) { + valueElement = document.createElement('select'); + valueElement.className = 'dropdown-value'; + valueElement.addEventListener('change', (event) => { + const selectedValue = event.target.value; + updateUrlParameter(key, selectedValue); + }); + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue; + valueElement.appendChild(option); + }); + } keyValuePair.appendChild(keyElement) keyValuePair.appendChild(valueElement) statusContainer.appendChild(keyValuePair); @@ -487,7 +519,7 @@ const columnSymbols = { name: '📂', - status: '✔️', + status: '⏯️', start_time: '🕒', duration: '⏳', info: 'ℹ️', @@ -726,7 +758,7 @@ } else { console.error("TODO") } - addKeyValueToStatus("sha", sha); + addKeyValueToStatus("sha", null, [sha, 'lala']); if (nameParams[1]) { addKeyValueToStatus("job", nameParams[1]); } diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 3d3c986d5f9..2ba8309ad60 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -318,7 +318,7 @@ class Result(MetaClasses.Serializable): files=[log_file] if log_file else None, ) - def finish_job_accordingly(self): + def complete_job(self): self.dump() if not self.is_ok(): print("ERROR: Job Failed") diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 797a799a74d..823c7e0f36d 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -19,7 +19,7 @@ from praktika.utils import Shell, TeePopen, Utils class Runner: @staticmethod - def generate_dummy_environment(workflow, job): + def generate_local_run_environment(workflow, job, pr=None, branch=None, sha=None): print("WARNING: Generate dummy env for local test") Shell.check( f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}" @@ -28,9 +28,9 @@ class Runner: WORKFLOW_NAME=workflow.name, JOB_NAME=job.name, REPOSITORY="", - BRANCH="", - SHA="", - PR_NUMBER=-1, + BRANCH=branch or Settings.MAIN_BRANCH if not pr else "", + SHA=sha or Shell.get_output("git rev-parse HEAD"), + PR_NUMBER=pr or -1, EVENT_TYPE="", JOB_OUTPUT_STREAM="", EVENT_FILE_PATH="", @@ -86,7 +86,7 @@ class Runner: return 0 - def _pre_run(self, workflow, job): + def _pre_run(self, workflow, job, local_run=False): env = _Environment.get() result = Result( @@ -96,9 +96,10 @@ class Runner: ) result.dump() - if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: - print("Update Job and Workflow Report") - HtmlRunnerHooks.pre_run(workflow, job) + if not local_run: + if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: + print("Update Job and Workflow Report") + HtmlRunnerHooks.pre_run(workflow, job) print("Download required artifacts") required_artifacts = [] @@ -133,11 +134,17 @@ class Runner: env.dump() if job.run_in_docker and not no_docker: - # TODO: add support for any image, including not from ci config (e.g. ubuntu:latest) - docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[ - job.run_in_docker - ] - docker = docker or f"{job.run_in_docker}:{docker_tag}" + if ":" in job.run_in_docker: + docker_name, docker_tag = job.run_in_docker.split(":") + print( + f"WARNING: Job [{job.name}] use custom docker image with a tag - praktika won't control docker version" + ) + else: + docker_name, docker_tag = ( + job.run_in_docker, + RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], + ) + docker = docker or f"{docker_name}:{docker_tag}" cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -285,14 +292,23 @@ class Runner: return True def run( - self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None + self, + workflow, + job, + docker="", + local_run=False, + no_docker=False, + param=None, + pr=None, + sha=None, + branch=None, ): res = True setup_env_code = -10 prerun_code = -10 run_code = -10 - if res and not dummy_env: + if res and not local_run: print( f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ===" ) @@ -309,13 +325,15 @@ class Runner: traceback.print_exc() print(f"=== Setup env finished ===\n\n") else: - self.generate_dummy_environment(workflow, job) + self.generate_local_run_environment( + workflow, job, pr=pr, branch=branch, sha=sha + ) - if res and not dummy_env: + if res: res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: - prerun_code = self._pre_run(workflow, job) + prerun_code = self._pre_run(workflow, job, local_run=local_run) res = prerun_code == 0 if not res: print(f"ERROR: Pre-run failed with exit code [{prerun_code}]") @@ -339,7 +357,7 @@ class Runner: traceback.print_exc() print(f"=== Run scrip finished ===\n\n") - if not dummy_env: + if not local_run: print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===") self._post_run(workflow, job, setup_env_code, prerun_code, run_code) print(f"=== Post run scrip finished ===") diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 00c469fec0c..fb918b4ddba 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -102,7 +102,11 @@ jobs: run: | . /tmp/praktika_setup_env.sh set -o pipefail - {PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG} + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee /tmp/praktika/praktika_run.log + fi {UPLOADS_GITHUB}\ """ diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 176e865e6f3..c67bdee015b 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -231,3 +231,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD_AMD_DEBUG = "Build amd64 debug" + STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/settings/settings.py b/ci/settings/settings.py index 8d5e7bc3c87..0f3b1efcee0 100644 --- a/ci/settings/settings.py +++ b/ci/settings/settings.py @@ -4,6 +4,8 @@ from ci.settings.definitions import ( RunnerLabels, ) +MAIN_BRANCH = "master" + S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts" CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES] DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS] diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 74129177efb..c7715b40fca 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -59,6 +59,19 @@ job_build_amd_debug = Job.Config( provides=[ArtifactNames.ch_debug_binary], ) +stateless_tests_job = Job.Config( + name=JobNames.STATELESS_TESTS, + runs_on=[RunnerLabels.BUILDER], + command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", + run_in_docker="clickhouse/fasttest:latest", + digest_config=Job.CacheDigestConfig( + include_paths=[ + "./ci/jobs/functional_stateless_tests.py", + ], + ), + requires=[ArtifactNames.ch_debug_binary], +) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -67,6 +80,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, job_build_amd_debug, + stateless_tests_job, ], artifacts=[ Artifact.Config( @@ -91,4 +105,4 @@ if __name__ == "__main__": # local job test inside praktika environment from praktika.runner import Runner - Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True) + Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) From b03a296542de52c3cb2b6f309a4bc496e4a70454 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:25:38 +0000 Subject: [PATCH 067/267] Fix right join - disabling PR lead to dup result --- src/Planner/PlannerJoinTree.cpp | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 7889a358d95..834e572b167 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,11 +665,15 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); - const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "pr_enabled={} table_expression:\n{}", + settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), + table_expression->dumpTree()); + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; @@ -914,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + // const bool other_table_already_chosen_for_reading_with_parallel_replicas + // = planner_context->getGlobalPlannerContext()->parallel_replicas_table + // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + // if (other_table_already_chosen_for_reading_with_parallel_replicas) + // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -930,6 +934,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); + LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); + auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1249,6 +1255,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); + auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1921,6 +1929,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); + return std::move(query_plans_stack.back()); } From dc976c48d284fa79ad05fe234130ed3794522511 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:36:57 +0000 Subject: [PATCH 068/267] Test --- .../03254_pr_join_on_dups.reference | 273 ++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 81 ++++++ 2 files changed, 354 insertions(+) create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.reference create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.sql diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference new file mode 100644 index 00000000000..58602bafb5d --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -0,0 +1,273 @@ +inner +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +left +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +right +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +right subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +self inner +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self inner nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self inner nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self inner nullable vs not nullable 2 +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self left +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self left nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable 2 +1 r1 \N 0 \N +1 r2 \N 0 \N +2 r3 \N 0 \N +3 r4 \N 0 \N +3 r5 \N 0 \N +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self right +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self right nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self right nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql new file mode 100644 index 00000000000..71695c0d486 --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -0,0 +1,81 @@ +drop table if exists X sync; +drop table if exists Y sync; + +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); +create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); + +insert into X (id, x_a, x_b) values (1, 'l1', 1), (2, 'l2', 2), (2, 'l3', 3), (3, 'l4', 4); +insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), (9, 'l9'); +insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); +insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); + +set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; + +select 'inner'; +select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'inner expr'; +select X.*, Y.* from X inner join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'left'; +select X.*, Y.* from X left join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'left expr'; +select X.*, Y.* from X left join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'right'; +select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'right expr'; +--select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'full expr'; +--select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'self inner'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self inner nullable vs not nullable 2'; +select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self left'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable'; +select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self left nullable vs not nullable 2'; +select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self right'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable'; +select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable vs not nullable'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self right nullable vs not nullable 2'; +--select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self full nullable vs not nullable 2'; +--select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +-- drop table X; +-- drop table Y; From e2c2e67c7b4915da6302a516826573cf1ccee701 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 10:02:24 +0000 Subject: [PATCH 069/267] Fix --- src/Planner/findParallelReplicasQuery.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 891e5034f44..58a7f48ee2b 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -423,7 +423,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From c7fce84729435f98222d0e02ba035cdd6085a0df Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 12:17:46 +0000 Subject: [PATCH 070/267] Cleanup --- src/Interpreters/ClusterProxy/executeQuery.cpp | 4 ++-- src/Planner/Planner.cpp | 2 +- src/Planner/PlannerJoinTree.cpp | 16 +++++----------- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- src/Planner/findQueryForParallelReplicas.h | 4 ++-- .../03173_parallel_replicas_join_bug.sh | 3 +++ 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 4b1f3094be3..e88fdeb0379 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 8d3c75fdabb..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -274,7 +274,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & return res; } -FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return {}; diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 834e572b167..5c08cc27aff 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -918,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - // const bool other_table_already_chosen_for_reading_with_parallel_replicas - // = planner_context->getGlobalPlannerContext()->parallel_replicas_table - // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - // if (other_table_already_chosen_for_reading_with_parallel_replicas) - // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + const bool other_table_already_chosen_for_reading_with_parallel_replicas + = planner_context->getGlobalPlannerContext()->parallel_replicas_table + && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + if (other_table_already_chosen_for_reading_with_parallel_replicas) + planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -934,8 +934,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); - LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); - auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1255,8 +1253,6 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); - auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1929,8 +1925,6 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); - return std::move(query_plans_stack.back()); } diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 58a7f48ee2b..d92500e82fc 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -250,7 +250,7 @@ const QueryNode * findQueryForParallelReplicas( return res; } -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; @@ -404,7 +404,7 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que return nullptr; } -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; diff --git a/src/Planner/findQueryForParallelReplicas.h b/src/Planner/findQueryForParallelReplicas.h index cdce4ad0b47..83aa11c8c64 100644 --- a/src/Planner/findQueryForParallelReplicas.h +++ b/src/Planner/findQueryForParallelReplicas.h @@ -15,10 +15,10 @@ struct SelectQueryOptions; /// Find a query which can be executed with parallel replicas up to WithMergableStage. /// Returned query will always contain some (>1) subqueries, possibly with joins. -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); /// Find a table from which we should read on follower replica. It's the left-most table within all JOINs and UNIONs. -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); struct JoinTreeQueryPlan; diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 289a49c72f4..1ee3d729cb4 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -6,12 +6,15 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q " + DROP TABLE IF EXISTS ids; CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); + DROP TABLE IF EXISTS data; CREATE TABLE data (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-01', 'CREATED'); + DROP TABLE IF EXISTS data2; CREATE TABLE data2 (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data2 VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-02', 'CREATED'); " From 0fda9bf238d261269b2dd7f47c79898ceaf931cb Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 14:38:57 +0000 Subject: [PATCH 071/267] Fix 03080_incorrect_join_with_merge.sql --- src/Storages/buildQueryTreeForShard.cpp | 4 ++-- .../queries/0_stateless/03080_incorrect_join_with_merge.sql | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index df9bfd049fb..8d8af134a05 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -366,8 +366,8 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { QueryTreeNodePtr join_table_expression; const auto join_kind = join_node->getKind(); - const auto join_strictness = join_node->getStrictness(); - if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + // const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner /* && join_strictness == JoinStrictness::All*/)) { join_table_expression = join_node->getRightTableExpression(); } diff --git a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql index a34c71a44e2..a743c5bdffb 100644 --- a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql +++ b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql @@ -2,6 +2,7 @@ SET enable_analyzer=1; SET distributed_foreground_insert=1; +DROP TABLE IF EXISTS first_table_lr SYNC; CREATE TABLE first_table_lr ( id String, @@ -11,6 +12,7 @@ ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03080/alter', ' ORDER BY id; +DROP TABLE IF EXISTS first_table; CREATE TABLE first_table ( id String, @@ -19,6 +21,7 @@ CREATE TABLE first_table ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'first_table_lr'); +DROP TABLE IF EXISTS second_table_lr; CREATE TABLE second_table_lr ( id String, @@ -26,6 +29,7 @@ CREATE TABLE second_table_lr ) ENGINE = MergeTree() ORDER BY id; +DROP TABLE IF EXISTS second_table; CREATE TABLE second_table ( id String, @@ -36,6 +40,7 @@ ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'second_table_lr INSERT INTO first_table VALUES ('1', '2'), ('3', '4'); INSERT INTO second_table VALUES ('1', '2'), ('3', '4'); +DROP TABLE IF EXISTS two_tables; CREATE TABLE two_tables ( id String, From 66f750ea6f12c08f99c7fecea700d8c7f1eaeeb7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 29 Oct 2024 15:12:03 +0000 Subject: [PATCH 072/267] remove debug logs --- src/Interpreters/Aggregator.cpp | 3 --- src/Parsers/CreateQueryUUIDs.cpp | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 2dd6513d498..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,7 +1504,6 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1521,7 +1520,6 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2934,7 +2932,6 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp index c788cc7a025..14cf5761a11 100644 --- a/src/Parsers/CreateQueryUUIDs.cpp +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -31,7 +31,7 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r /// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible. /// Thus it's not safe for example to replace /// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with - /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b" + /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "248372b7-02c4-4c88-a5e1-282a83cc572a" AS SELECT a FROM b" /// This replacement is safe only for CREATE queries when inner target tables don't exist yet. if (!query.attach) { From bebef8d0d96e27c9823419b3a7f669d62c6a6a56 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 23:58:39 +0000 Subject: [PATCH 073/267] Fix right joins again --- src/Planner/PlannerJoinTree.cpp | 38 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5c08cc27aff..0007dc9d158 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,7 +665,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - const auto & query_context = planner_context->getQueryContext(); + auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); LOG_DEBUG( @@ -922,17 +922,33 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres = planner_context->getGlobalPlannerContext()->parallel_replicas_table && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + { + chassert(query_context->canUseParallelReplicasOnFollower()); - storage->read( - query_plan, - columns_names, - storage_snapshot, - table_expression_query_info, - query_context, - from_stage, - max_block_size, - max_streams); + auto mutable_context = Context::createCopy(query_context); + mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + mutable_context, + from_stage, + max_block_size, + max_streams); + } + else + { + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + query_context, + from_stage, + max_block_size, + max_streams); + } auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { From 6004cb8ff4fc0b751f9cd0821a4d9214cfd63e3e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 11:08:21 +0000 Subject: [PATCH 074/267] Remove current_table_chosen_for_reading_with_parallel_replicas flag --- src/Planner/PlannerJoinTree.cpp | 8 +++----- src/Storages/SelectQueryInfo.h | 2 -- src/Storages/StorageMergeTree.cpp | 4 +--- src/Storages/StorageReplicatedMergeTree.cpp | 5 +---- 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 0007dc9d158..5e29c1a6a81 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -702,8 +702,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres table_expression_query_info.table_expression = table_expression; if (const auto & filter_actions = table_expression_data.getFilterActions()) table_expression_query_info.filter_actions_dag = std::make_shared(filter_actions->clone()); - table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas - = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings[Setting::max_threads]; size_t max_threads_execute_query = settings[Setting::max_threads]; @@ -918,10 +916,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas + const bool other_table_chosen_for_reading_with_parallel_replicas = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) + && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; + if (other_table_chosen_for_reading_with_parallel_replicas) { chassert(query_context->canUseParallelReplicasOnFollower()); diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 7ad6a733c6f..f67274f227a 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -162,8 +162,6 @@ struct SelectQueryInfo /// It's guaranteed to be present in JOIN TREE of `query_tree` QueryTreeNodePtr table_expression; - bool current_table_chosen_for_reading_with_parallel_replicas = false; - /// Table expression modifiers for storage std::optional table_expression_modifiers; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 40cd6e01dba..55f79a54f2e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -276,9 +276,7 @@ void StorageMergeTree::read( } const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree] - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); + && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree]; if (auto plan = reader.read( column_names, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fc3245eafcf..3f1d2bc6a1c 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5640,10 +5640,7 @@ void StorageReplicatedMergeTree::readLocalImpl( const size_t max_block_size, const size_t num_streams) { - const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); - + const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower(); auto plan = reader.read( column_names, storage_snapshot, query_info, local_context, max_block_size, num_streams, From b724f2c33141fb0348742d6b48c4b58763450ff7 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 12:24:56 +0000 Subject: [PATCH 075/267] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5e29c1a6a81..ac05f893cd2 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -916,13 +916,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table + const bool no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode + = query_context->canUseParallelReplicasOnFollower() && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; - if (other_table_chosen_for_reading_with_parallel_replicas) + if (no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode) { - chassert(query_context->canUseParallelReplicasOnFollower()); - auto mutable_context = Context::createCopy(query_context); mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( @@ -984,7 +982,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From 8245e3d7ef5530d55763700b0c1aeae1697dd26c Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 21:23:29 +0000 Subject: [PATCH 076/267] Fix --- src/Planner/PlannerJoinTree.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index ac05f893cd2..481cb9b8649 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -982,10 +982,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From 0808d7f0fb96e9f6c6536b9033cf2f7499cbb383 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 31 Oct 2024 12:26:46 +0000 Subject: [PATCH 077/267] Fix FULL JOINs again --- src/Planner/Planner.cpp | 3 ++- src/Planner/Planner.h | 1 + src/Planner/PlannerJoinTree.cpp | 15 ++++++++++++--- src/Planner/findParallelReplicasQuery.cpp | 8 +------- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 17277dfe8cd..260462652fc 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,6 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) + , root_planner(true) { } @@ -1537,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index ae78f05cbd4..bf11c9ef9cd 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,6 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; + bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 481cb9b8649..160d7f07d5b 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,6 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, + const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -982,7 +983,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1815,6 +1819,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + const QueryNode & parent_query_node = query_node->as(); auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1850,7 +1855,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * Examples: Distributed, LiveView, Merge storages. */ auto left_table_expression = table_expressions_stack.front(); - auto left_table_expression_query_plan = buildQueryPlanForTableExpression(left_table_expression, + auto left_table_expression_query_plan = buildQueryPlanForTableExpression( + left_table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, @@ -1923,7 +1930,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * table expression in subquery. */ bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); - query_plans_stack.push_back(buildQueryPlanForTableExpression(table_expression, + query_plans_stack.push_back(buildQueryPlanForTableExpression( + table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index d92500e82fc..63c0ce8eb68 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -101,17 +101,11 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) - { query_tree_node = join_node.getLeftTableExpression().get(); - } else if (join_kind == JoinKind::Right) - { query_tree_node = join_node.getRightTableExpression().get(); - } else - { return {}; - } break; } @@ -275,7 +269,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - return nullptr; + return query_node; /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); From b04b332d8798396a5fbb1162c726dcb14b96e8cd Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 30 Oct 2024 23:20:41 +0100 Subject: [PATCH 078/267] support max_joined_block_rows --- src/Interpreters/ConcurrentHashJoin.cpp | 47 ++++++++++++------- src/Interpreters/ConcurrentHashJoin.h | 8 ++-- src/Interpreters/HashJoin/HashJoin.cpp | 7 +-- src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Interpreters/HashJoin/ScatteredBlock.h | 9 ++++ src/Interpreters/IJoin.h | 13 +++-- .../Transforms/JoiningTransform.cpp | 10 +++- src/Processors/Transforms/JoiningTransform.h | 4 ++ .../02962_max_joined_block_rows.sql | 4 +- 9 files changed, 64 insertions(+), 40 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 6ebf72bd33e..746ce5d4e9f 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -27,7 +27,9 @@ #include #include +#include #include +#include using namespace DB; @@ -123,9 +125,7 @@ ConcurrentHashJoin::ConcurrentHashJoin( auto inner_hash_join = std::make_shared(); inner_hash_join->data = std::make_unique( table_join_, right_sample_block, any_take_last_row_, reserve_size, fmt::format("concurrent{}", idx)); - /// Non zero `max_joined_block_rows` allows to process block partially and return not processed part. - /// TODO: It's not handled properly in ConcurrentHashJoin case, so we set it to 0 to disable this feature. - inner_hash_join->data->setMaxJoinedBlockRows(0); + inner_hash_join->data->setMaxJoinedBlockRows(table_join->maxJoinedBlockRows()); hash_joins[idx] = std::move(inner_hash_join); }); } @@ -222,35 +222,50 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l void ConcurrentHashJoin::joinBlock(Block & block, std::shared_ptr & /*not_processed*/) { Blocks res; - std::shared_ptr not_processed; - joinBlock(block, res, not_processed); + ExtraScatteredBlocks extra_blocks; + joinBlock(block, extra_blocks, res); + chassert(!extra_blocks.rows()); block = concatenateBlocks(res); } -void ConcurrentHashJoin::joinBlock(Block & block, std::vector & res, std::shared_ptr & /*not_processed*/) +void ConcurrentHashJoin::joinBlock(Block & block, ExtraScatteredBlocks & extra_blocks, std::vector & res) { - hash_joins[0]->data->materializeColumnsFromLeftBlock(block); + ScatteredBlocks dispatched_blocks; + auto & remaining_blocks = extra_blocks.remaining_blocks; + if (extra_blocks.rows()) + { + dispatched_blocks = std::move(remaining_blocks); + } + else + { + hash_joins[0]->data->materializeColumnsFromLeftBlock(block); + dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); + } - auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); block = {}; + + /// Just in case, should be no-op always + remaining_blocks.resize(slots); + + chassert(res.empty()); + res.clear(); + res.reserve(dispatched_blocks.size()); + for (size_t i = 0; i < dispatched_blocks.size(); ++i) { std::shared_ptr none_extra_block; auto & hash_join = hash_joins[i]; auto & dispatched_block = dispatched_blocks[i]; - if (i == 0 || dispatched_block.rows()) - hash_join->data->joinBlock(dispatched_block, none_extra_block); + if (dispatched_block && (i == 0 || dispatched_block.rows())) + hash_join->data->joinBlock(dispatched_block, remaining_blocks[i]); if (none_extra_block && !none_extra_block->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "not_processed should be empty"); } - - chassert(res.empty()); - res.clear(); - res.reserve(dispatched_blocks.size()); for (size_t i = 0; i < dispatched_blocks.size(); ++i) { - if (i == 0 || dispatched_blocks[i].rows()) - res.emplace_back(std::move(dispatched_blocks[i]).getSourceBlock()); + auto & dispatched_block = dispatched_blocks[i]; + if (dispatched_block && (i == 0 || dispatched_block.rows())) + res.emplace_back(std::move(dispatched_block).getSourceBlock()); } } diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 33407045c44..48d487ba433 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -1,13 +1,11 @@ #pragma once -#include #include -#include #include #include #include -#include #include +#include #include #include #include @@ -57,8 +55,8 @@ public: bool alwaysReturnsEmptySet() const override; bool supportParallelJoin() const override { return true; } - bool supportsJoinWithManyResultBlocks() const override { return true; } - void joinBlock(Block & block, std::vector & res, std::shared_ptr & not_processed) override; + bool isScatteredJoin() const override { return true; } + void joinBlock(Block & block, ExtraScatteredBlocks & extra_blocks, std::vector & res) override; IBlocksStreamPtr getNonJoinedBlocks(const Block & left_sample_block, const Block & result_sample_block, UInt64 max_block_size) const override; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 526ac456f7f..38175770e35 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -1058,7 +1058,7 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) } } -void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) +void HashJoin::joinBlock(ScatteredBlock & block, ScatteredBlock & remaining_block) { if (!data) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot join after data has been released"); @@ -1089,7 +1089,6 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) prefer_use_maps_all, [&](auto kind_, auto strictness_, auto & maps_vector_) { - ScatteredBlock remaining_block; if constexpr (std::is_same_v, std::vector>) { remaining_block = HashJoinMethods::joinBlockImpl( @@ -1109,10 +1108,6 @@ void HashJoin::joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown maps type"); } - if (remaining_block.rows()) - not_processed = std::make_shared(std::move(remaining_block).getSourceBlock()); - else - not_processed.reset(); }); chassert(joined); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 9e42d58e6b0..5fc12879674 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -154,7 +154,7 @@ public: void joinBlock(Block & block, ExtraBlockPtr & not_processed) override; /// Called directly from ConcurrentJoin::joinBlock - void joinBlock(ScatteredBlock & block, ExtraBlockPtr & not_processed); + void joinBlock(ScatteredBlock & block, ScatteredBlock & remaining_block); /// Check joinGet arguments and infer the return type. DataTypePtr joinGetCheckAndGetReturnType(const DataTypes & data_types, const String & column_name, bool or_null) const; diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index d8077c30c53..729377f6758 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -325,4 +325,13 @@ private: using ScatteredBlocks = std::vector; +struct ExtraScatteredBlocks +{ + ScatteredBlocks remaining_blocks; + + bool rows() const + { + return std::ranges::any_of(remaining_blocks, [](const auto & bl) { return bl.rows(); }); + } +}; } diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h index 4bfd43598ec..58cf5254851 100644 --- a/src/Interpreters/IJoin.h +++ b/src/Interpreters/IJoin.h @@ -3,9 +3,10 @@ #include #include -#include -#include #include +#include +#include +#include #include namespace DB @@ -90,13 +91,11 @@ public: /// Could be called from different threads in parallel. virtual void joinBlock(Block & block, std::shared_ptr & not_processed) = 0; - virtual bool supportsJoinWithManyResultBlocks() const { return false; } + virtual bool isScatteredJoin() const { return false; } virtual void joinBlock( - [[maybe_unused]] Block & block, - [[maybe_unused]] std::vector & res, - [[maybe_unused]] std::shared_ptr & not_processed) + [[maybe_unused]] Block & block, [[maybe_unused]] ExtraScatteredBlocks & extra_blocks, [[maybe_unused]] std::vector & res) { - throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Clone method is not supported for {}", getName()); + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "joinBlock is not supported for {}", getName()); } /** Set/Get totals for right table diff --git a/src/Processors/Transforms/JoiningTransform.cpp b/src/Processors/Transforms/JoiningTransform.cpp index ceb95cd7ad1..2862575b541 100644 --- a/src/Processors/Transforms/JoiningTransform.cpp +++ b/src/Processors/Transforms/JoiningTransform.cpp @@ -201,8 +201,14 @@ Blocks JoiningTransform::readExecute(Chunk & chunk) auto join_block = [&]() { - if (join->supportsJoinWithManyResultBlocks()) - join->joinBlock(block, res, not_processed); + if (join->isScatteredJoin()) + { + join->joinBlock(block, remaining_blocks, res); + if (remaining_blocks.rows()) + not_processed = std::make_shared(); + else + not_processed.reset(); + } else { join->joinBlock(block, not_processed); diff --git a/src/Processors/Transforms/JoiningTransform.h b/src/Processors/Transforms/JoiningTransform.h index c3445cf3e36..d0042983eb5 100644 --- a/src/Processors/Transforms/JoiningTransform.h +++ b/src/Processors/Transforms/JoiningTransform.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -83,6 +84,9 @@ private: bool default_totals; bool initialized = false; + /// Only used with ConcurrentHashJoin + ExtraScatteredBlocks remaining_blocks; + ExtraBlockPtr not_processed; FinishCounterPtr finish_counter; diff --git a/tests/queries/0_stateless/02962_max_joined_block_rows.sql b/tests/queries/0_stateless/02962_max_joined_block_rows.sql index c31ab5e1132..27b2a74b802 100644 --- a/tests/queries/0_stateless/02962_max_joined_block_rows.sql +++ b/tests/queries/0_stateless/02962_max_joined_block_rows.sql @@ -27,11 +27,9 @@ SETTINGS max_joined_block_size_rows = 10; SELECT '--'; --- parallel_hash doen't support max_joined_block_size_rows - SET join_algorithm = 'parallel_hash'; -SELECT max(bs) > 10, b FROM ( +SELECT max(bs) <= 10, b FROM ( SELECT blockSize() as bs, * FROM t1 JOIN t2 ON t1.a = t2.a ) GROUP BY b ORDER BY b From c34de3cda877b2389d9a90be81a1bdddc2a515ca Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 31 Oct 2024 20:00:41 +0100 Subject: [PATCH 079/267] fix tidy --- src/Interpreters/ConcurrentHashJoin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 746ce5d4e9f..d243b223241 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -234,7 +234,7 @@ void ConcurrentHashJoin::joinBlock(Block & block, ExtraScatteredBlocks & extra_b auto & remaining_blocks = extra_blocks.remaining_blocks; if (extra_blocks.rows()) { - dispatched_blocks = std::move(remaining_blocks); + dispatched_blocks.swap(remaining_blocks); } else { From fce3a0463b182b01f8c429b58e172550b27cd3eb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 31 Oct 2024 20:30:05 +0100 Subject: [PATCH 080/267] fix perf test --- src/Interpreters/HashJoin/AddedColumns.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 3e6a29da6c3..8316d5df00f 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -1,4 +1,6 @@ #pragma once + +#include #include #include @@ -167,7 +169,7 @@ public: return; /// Do not allow big allocations when user set max_joined_block_rows to huge value - size_t reserve_size = std::min(max_joined_block_rows, rows_to_add * 2); /// rows_to_add + size_t reserve_size = std::min(max_joined_block_rows, DEFAULT_BLOCK_SIZE * 2); /// rows_to_add if (need_replicate) /// Reserve 10% more space for columns, because some rows can be repeated From 752dfead2c5fc686b64d062b7f032196657295ff Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 13:06:29 +0000 Subject: [PATCH 081/267] Only RIGHT JOINs test --- src/Planner/PlannerJoinTree.cpp | 17 +++-- src/Planner/findParallelReplicasQuery.cpp | 11 ++- .../03254_pr_join_on_dups.reference | 72 ------------------- .../0_stateless/03254_pr_join_on_dups.sql | 28 +------- 4 files changed, 26 insertions(+), 102 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 160d7f07d5b..d79aa626d5e 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - const QueryNode & parent_query_node, + [[maybe_unused]] const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,6 +958,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "parallel_replicas_node={} parent_query_node={}", + UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), + UInt64(&parent_query_node)); + + // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); + /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { @@ -984,9 +992,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } } else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) + ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + // && (!table_join_node + // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node + // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 63c0ce8eb68..8d818daa575 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,11 +265,17 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; + } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return query_node; + } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -303,7 +309,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - + if (!res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 58602bafb5d..95cb0d8cae2 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,34 +88,6 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 -full -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 -full subs -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -227,47 +199,3 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 -self full -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l2 2 2 l3 3 -2 l3 3 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 4 l5 \N -4 l5 \N 4 l6 \N -4 l6 \N 4 l5 \N -4 l6 \N 4 l6 \N -5 l7 \N 5 l7 \N -8 l8 \N 8 l8 \N -9 l9 \N 9 l9 \N -self full nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 0 \N -4 l6 \N 0 \N -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N -self full nullable vs not nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l2 2 -3 l4 4 2 l3 3 -4 l5 \N 3 l4 4 -4 l6 \N 3 l4 4 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 71695c0d486..22e94507c83 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; @@ -29,15 +29,6 @@ select 'right'; select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'right expr'; ---select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; - -select 'full'; -select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; -select 'full subs'; -select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'full expr'; ---select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; @@ -45,7 +36,6 @@ select 'self inner nullable'; select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable vs not nullable'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self inner nullable vs not nullable 2'; select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -55,7 +45,6 @@ select 'self left nullable'; select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self left nullable vs not nullable'; select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self left nullable vs not nullable 2'; select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -65,17 +54,6 @@ select 'self right nullable'; select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self right nullable vs not nullable 2'; ---select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; -select 'self full'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable'; -select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable vs not nullable'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self full nullable vs not nullable 2'; ---select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; - --- drop table X; --- drop table Y; +drop table X sync; +drop table Y sync; From 31f761508875de1fdc678429b316e19556538eb4 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 14:52:42 +0000 Subject: [PATCH 082/267] Fix --- src/Planner/Planner.cpp | 4 ++-- src/Planner/Planner.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 260462652fc..4b5a2b903c0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - , root_planner(true) + // , root_planner(true) { } @@ -1538,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index bf11c9ef9cd..8d771c343c3 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - bool root_planner = false; + // bool root_planner = false; }; } From ac0902b08820dcd64cb41ba6bd34e4957fe8eadf Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 15:57:44 +0000 Subject: [PATCH 083/267] Fix --- src/Planner/findParallelReplicasQuery.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 8d818daa575..e89f06d6cc3 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -274,7 +274,8 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr if (stack.top() == query_tree_node.get()) { LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); - return query_node; + return nullptr; + // return query_node; } /// This is needed to avoid infinite recursion. From 38a3c6707525fba84c190e6a7e42f791b2da5659 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 1 Nov 2024 18:17:08 +0000 Subject: [PATCH 084/267] revert unnecessary changes --- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 + src/QueryPipeline/QueryPipelineBuilder.h | 6 - src/QueryPipeline/QueryPlanResourceHolder.h | 2 - src/Storages/MergeTree/MergeTask.cpp | 129 +++++++++++++++--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 19 files changed, 134 insertions(+), 73 deletions(-) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 1560e88ffef..07ee8f4ddef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include @@ -30,18 +29,17 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - temp_data_buffer_.get(), + out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) - , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index b7bb9914cf8..99fd95d82d9 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,8 +11,6 @@ namespace Poco namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -37,7 +35,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -64,8 +62,6 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - std::shared_ptr temp_data_buffer = nullptr; - LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index d4e4ba6aa5f..3a9cf7ee141 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,7 +3,6 @@ #include #include #include -#include namespace DB { @@ -16,7 +15,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index fc300e41026..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,8 +9,6 @@ namespace DB { -class TemporaryDataBuffer; - /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -23,7 +21,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -47,7 +45,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - std::shared_ptr out_row_sources_buf = nullptr; + WriteBuffer * out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index a3a33080f52..cd347d371d9 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include namespace DB { @@ -38,13 +37,12 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) - , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index d3b9837a253..2f23f2a5c4d 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,8 +24,6 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -61,8 +59,6 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. - std::shared_ptr temp_data_buffer = nullptr; - /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 1ceb1f46234..9f124c6ba18 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,7 +1,6 @@ #include #include #include -#include namespace DB { @@ -15,13 +14,12 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) - , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index 6f877459147..e6d20ddac75 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,8 +8,6 @@ namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -24,7 +22,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -39,8 +37,6 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; - std::shared_ptr temp_data_buffer = nullptr; - std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 9b09c802783..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 13330dcff6d..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index fb8e5ce74e3..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index a9d9f4fb619..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 0bdccd4795d..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index 1c03a4d74cd..d99f9a7d1f1 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,6 +20,7 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; + TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 1e274a97a08..a9e5b1535c0 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,12 +197,6 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } - template - void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) - { - (resources.*field).push_back(std::move(resource)); - } - /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index ee2ecc25cd5..10f7f39ab09 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,7 +13,6 @@ class QueryPlan; class Context; struct QueryIdHolder; -class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -34,7 +33,6 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; - std::vector> rows_sources_temporary_file; }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 9c2bd59e7cb..e73bc18557c 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -65,6 +65,11 @@ namespace ProfileEvents extern const Event MergeProjectionStageExecuteMilliseconds; } +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForMerge; +} + namespace DB { namespace Setting @@ -124,6 +129,66 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } + +/// Manages the "rows_sources" temporary file that is used during vertical merge. +class RowsSourcesTemporaryFile : public ITemporaryFileLookup +{ +public: + /// A logical name of the temporary file under which it will be known to the plan steps that use it. + static constexpr auto FILE_ID = "rows_sources"; + + explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) + : temporary_data_on_disk(temporary_data_on_disk_->childScope(CurrentMetrics::TemporaryFilesForMerge)) + { + } + + WriteBuffer & getTemporaryFileForWriting(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); + + tmp_data_buffer = std::make_unique(temporary_data_on_disk.get()); + return *tmp_data_buffer; + } + + std::unique_ptr getTemporaryFileForReading(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (!finalized) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); + + /// tmp_disk might not create real file if no data was written to it. + if (final_size == 0) + return std::make_unique(); + + /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. + return tmp_data_buffer->read(); + } + + /// Returns written data size in bytes + size_t finalizeWriting() + { + if (!tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was not requested for writing"); + + auto stat = tmp_data_buffer->finishWriting(); + finalized = true; + final_size = stat.uncompressed_size; + return final_size; + } + +private: + std::unique_ptr tmp_data_buffer; + TemporaryDataOnDiskScopePtr temporary_data_on_disk; + bool finalized = false; + size_t final_size = 0; +}; + static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -425,7 +490,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); + ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -802,11 +867,24 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; + size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; + size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - ctx->rows_sources_temporary_file->finishWriting(); + size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); + /// In special case, when there is only one source part, and no rows were skipped, we may have + /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total + /// number of input rows. + /// Note that only one byte index is written for each row, so number of rows is equals to the number of bytes written. + if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " + "of bytes written to rows_sources file ({}). It is a bug.", + sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -838,12 +916,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - std::unique_ptr rows_sources_read_buf_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_read_buf(std::move(rows_sources_read_buf_)) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -851,13 +929,15 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { - const auto & header = pipeline.getHeader(); + const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!rows_sources_read_buf) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + + auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); auto transform = std::make_unique( header, @@ -892,7 +972,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - std::unique_ptr rows_sources_read_buf; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -943,7 +1023,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - ctx->rows_sources_temporary_file->read(), + RowsSourcesTemporaryFile::FILE_ID, (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -972,9 +1052,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1347,7 +1427,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - std::shared_ptr rows_sources_temporary_file_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1357,7 +1437,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file(rows_sources_temporary_file_) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1367,7 +1447,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1377,6 +1457,14 @@ public: const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); + WriteBuffer * rows_sources_write_buf = nullptr; + if (!rows_sources_temporary_file_name.empty()) + { + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); + } + switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1389,14 +1477,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_temporary_file, + rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1411,7 +1499,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, cleanup); break; @@ -1424,7 +1512,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; } @@ -1466,7 +1554,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - std::shared_ptr rows_sources_temporary_file; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1635,7 +1723,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge + (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1700,6 +1788,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index a6969e3aa48..53792165987 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -42,6 +42,7 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; +class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -243,7 +244,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -322,7 +323,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 5e9674fb5d6..5efd33ce09a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -111,11 +111,10 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", - prewhere_actions.dumpConditions(), + LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From 6d5c707d2cfc029528ba1a32ceb4cd313e198147 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 21:32:07 +0000 Subject: [PATCH 085/267] Cleanup --- src/Planner/findParallelReplicasQuery.cpp | 14 +++++++------- .../02771_parallel_replicas_analyzer.sql | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index e89f06d6cc3..5db67d7c793 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -97,8 +97,8 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - auto join_kind = join_node.getKind(); - auto join_strictness = join_node.getStrictness(); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) query_tree_node = join_node.getLeftTableExpression().get(); @@ -266,7 +266,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; } @@ -310,10 +310,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - if (!res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); + // if (!res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql index 081077ba460..a2d26a8fc78 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -1,5 +1,5 @@ -- Tags: zookeeper -DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated; +DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated SYNC; CREATE TABLE join_inner_table__fuzz_146_replicated ( `id` UUID, @@ -52,4 +52,4 @@ WHERE GROUP BY is_initial_query, query ORDER BY is_initial_query DESC, c, query; -DROP TABLE join_inner_table__fuzz_146_replicated; +DROP TABLE join_inner_table__fuzz_146_replicated SYNC; From 1e3f08ab3e48d666cd5e3b02cfecf50915738377 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 19:44:03 +0000 Subject: [PATCH 086/267] Only with analyzer --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 22e94507c83..5f2f209d0b0 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 2530fd233f3c4d81ff7ad6f18ec0e3a73320c8d0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 21:36:02 +0000 Subject: [PATCH 087/267] Added 03261_pr_semi_anti_join --- .../03261_pr_semi_anti_join.reference | 16 +++++++++++ .../0_stateless/03261_pr_semi_anti_join.sql | 27 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.reference create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.sql diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.reference b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference new file mode 100644 index 00000000000..782147f1f6f --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference @@ -0,0 +1,16 @@ +semi left +2 a3 2 b1 +2 a6 2 b1 +4 a5 4 b3 +semi right +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +anti left +0 a1 0 +1 a2 1 +3 a4 3 +anti right +0 5 b6 diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql new file mode 100644 index 00000000000..d2ea3725d6b --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); + +CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); +CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; From 4e8a96e9c18eb79e3ac5273796664ea6178e5cab Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 2 Nov 2024 15:29:09 +0100 Subject: [PATCH 088/267] squash small blocks before join transforms --- src/QueryPipeline/QueryPipelineBuilder.cpp | 14 +++++++++++--- tests/performance/all_join_opt.xml | 5 +++-- tests/performance/join_append_block.xml | 3 ++- tests/performance/joins_in_memory.xml | 3 ++- tests/performance/one_thousand_joins.xml | 2 +- tests/performance/string_join.xml | 3 ++- 6 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index be0e17db2a2..463d18ed7a2 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -441,9 +442,12 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Processors processors; for (auto & outport : outports) { + auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 0); + connect(*outport, squashing->getInputs().front()); + processors.emplace_back(squashing); auto adding_joined = std::make_shared(right->getHeader(), join); - connect(*outport, adding_joined->getInputs().front()); - processors.emplace_back(adding_joined); + connect(squashing->getOutputPort(), adding_joined->getInputs().front()); + processors.emplace_back(std::move(adding_joined)); } return processors; }; @@ -497,10 +501,13 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Block left_header = left->getHeader(); for (size_t i = 0; i < num_streams; ++i) { + auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 0); + connect(**lit, squashing->getInputs().front()); + auto joining = std::make_shared( left_header, output_header, join, max_block_size, false, default_totals, finish_counter); - connect(**lit, joining->getInputs().front()); + connect(squashing->getOutputPort(), joining->getInputs().front()); connect(**rit, joining->getInputs().back()); if (delayed_root) { @@ -532,6 +539,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe if (collected_processors) collected_processors->emplace_back(joining); + left->pipe.processors->emplace_back(std::move(squashing)); left->pipe.processors->emplace_back(std::move(joining)); } diff --git a/tests/performance/all_join_opt.xml b/tests/performance/all_join_opt.xml index 0ab9c39f67c..5a4741690c7 100644 --- a/tests/performance/all_join_opt.xml +++ b/tests/performance/all_join_opt.xml @@ -5,11 +5,12 @@ INSERT INTO test SELECT number % 10000, number % 10000, number % 10000 FROM numbers(10000000) INSERT INTO test1 SELECT number % 1000 , number % 1000, number % 1000 FROM numbers(100000) - SELECT MAX(test1.a) FROM test INNER JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test INNER JOIN test1 on test.b = test1.b settings join_algorithm='hash' + SELECT MAX(test1.a) FROM test INNER JOIN test1 on test.b = test1.b settings join_algorithm='parallel_hash' SELECT MAX(test1.a) FROM test LEFT JOIN test1 on test.b = test1.b SELECT MAX(test1.a) FROM test RIGHT JOIN test1 on test.b = test1.b SELECT MAX(test1.a) FROM test FULL JOIN test1 on test.b = test1.b DROP TABLE IF EXISTS test DROP TABLE IF EXISTS test1 - \ No newline at end of file + diff --git a/tests/performance/join_append_block.xml b/tests/performance/join_append_block.xml index 15859e95941..3eceb0bbe91 100644 --- a/tests/performance/join_append_block.xml +++ b/tests/performance/join_append_block.xml @@ -1,3 +1,4 @@ - SELECT count(c) FROM numbers_mt(100000000) AS a INNER JOIN (SELECT number, toString(number) AS c FROM numbers(2000000)) AS b ON (a.number % 10000000) = b.number + SELECT count(c) FROM numbers_mt(100000000) AS a INNER JOIN (SELECT number, toString(number) AS c FROM numbers(2000000)) AS b ON (a.number % 10000000) = b.number settings join_algorithm='hash' + SELECT count(c) FROM numbers_mt(100000000) AS a INNER JOIN (SELECT number, toString(number) AS c FROM numbers(2000000)) AS b ON (a.number % 10000000) = b.number settings join_algorithm='parallel_hash' diff --git a/tests/performance/joins_in_memory.xml b/tests/performance/joins_in_memory.xml index 7ff6a803d04..debf533d0e4 100644 --- a/tests/performance/joins_in_memory.xml +++ b/tests/performance/joins_in_memory.xml @@ -13,7 +13,8 @@ SELECT COUNT() FROM ints l ANY LEFT JOIN ints r USING i64 WHERE i32 IN(42, 10042, 20042, 30042, 40042) SELECT COUNT() FROM ints l INNER JOIN ints r USING i64 WHERE i32 = 20042 - SELECT COUNT() FROM ints l INNER JOIN ints r USING i64,i32,i16,i8 WHERE i32 = 20042 settings query_plan_filter_push_down = 0 + SELECT COUNT() FROM ints l INNER JOIN ints r USING i64,i32,i16,i8 WHERE i32 = 20042 settings query_plan_filter_push_down = 0 settings join_algorithm='hash' + SELECT COUNT() FROM ints l INNER JOIN ints r USING i64,i32,i16,i8 WHERE i32 = 20042 settings query_plan_filter_push_down = 0 settings join_algorithm='parallel_hash' SELECT COUNT() FROM ints l INNER JOIN ints r ON l.i64 = r.i64 WHERE i32 = 20042 SELECT COUNT() FROM ints l INNER JOIN ints r USING i64 WHERE i32 IN(42, 10042, 20042, 30042, 40042) diff --git a/tests/performance/one_thousand_joins.xml b/tests/performance/one_thousand_joins.xml index d7e66bb1c67..d8ac057d967 100644 --- a/tests/performance/one_thousand_joins.xml +++ b/tests/performance/one_thousand_joins.xml @@ -1,6 +1,6 @@ -SELECT * FROM (SELECT 1 AS x) t1 JOIN (SELECT 1 AS x) t2 ON t1.x = t2.x JOIN (SELECT 1 AS x) t3 ON t1.x = t3.x JOIN (SELECT 1 AS x) t4 ON t1.x = t4.x JOIN (SELECT 1 AS x) t5 ON t1.x = t5.x JOIN (SELECT 1 AS x) t6 ON t1.x = t6.x JOIN (SELECT 1 AS x) t7 ON t1.x = t7.x JOIN (SELECT 1 AS x) t8 ON t1.x = t8.x JOIN (SELECT 1 AS x) t9 ON t1.x = t9.x JOIN (SELECT 1 AS x) t10 ON t1.x = t10.x JOIN (SELECT 1 AS x) t11 ON t1.x = t11.x JOIN (SELECT 1 AS x) t12 ON t1.x = t12.x JOIN (SELECT 1 AS x) t13 ON t1.x = t13.x JOIN (SELECT 1 AS x) t14 ON t1.x = t14.x JOIN (SELECT 1 AS x) t15 ON t1.x = t15.x JOIN (SELECT 1 AS x) t16 ON t1.x = t16.x JOIN (SELECT 1 AS x) t17 ON t1.x = t17.x JOIN (SELECT 1 AS x) t18 ON t1.x = t18.x JOIN (SELECT 1 AS x) t19 ON t1.x = t19.x JOIN (SELECT 1 AS x) t20 ON t1.x = t20.x JOIN (SELECT 1 AS x) t21 ON t1.x = t21.x JOIN (SELECT 1 AS x) t22 ON t1.x = t22.x JOIN (SELECT 1 AS x) t23 ON t1.x = t23.x JOIN (SELECT 1 AS x) t24 ON t1.x = t24.x JOIN (SELECT 1 AS x) t25 ON t1.x = t25.x JOIN (SELECT 1 AS x) t26 ON t1.x = t26.x JOIN (SELECT 1 AS x) t27 ON t1.x = t27.x JOIN (SELECT 1 AS x) t28 ON t1.x = t28.x JOIN (SELECT 1 AS x) t29 ON t1.x = t29.x JOIN (SELECT 1 AS x) t30 ON t1.x = t30.x JOIN (SELECT 1 AS x) t31 ON t1.x = t31.x JOIN (SELECT 1 AS x) t32 ON t1.x = t32.x JOIN (SELECT 1 AS x) t33 ON t1.x = t33.x JOIN (SELECT 1 AS x) t34 ON t1.x = t34.x JOIN (SELECT 1 AS x) t35 ON t1.x = t35.x JOIN (SELECT 1 AS x) t36 ON t1.x = t36.x JOIN (SELECT 1 AS x) t37 ON t1.x = t37.x JOIN (SELECT 1 AS x) t38 ON t1.x = t38.x JOIN (SELECT 1 AS x) t39 ON t1.x = t39.x JOIN (SELECT 1 AS x) t40 ON t1.x = t40.x JOIN (SELECT 1 AS x) t41 ON t1.x = t41.x JOIN (SELECT 1 AS x) t42 ON t1.x = t42.x JOIN (SELECT 1 AS x) t43 ON t1.x = t43.x JOIN (SELECT 1 AS x) t44 ON t1.x = t44.x JOIN (SELECT 1 AS x) t45 ON t1.x = t45.x JOIN (SELECT 1 AS x) t46 ON t1.x = t46.x JOIN (SELECT 1 AS x) t47 ON t1.x = t47.x JOIN (SELECT 1 AS x) t48 ON t1.x = t48.x JOIN (SELECT 1 AS x) t49 ON t1.x = t49.x JOIN (SELECT 1 AS x) t50 ON t1.x = t50.x JOIN (SELECT 1 AS x) t51 ON t1.x = t51.x JOIN (SELECT 1 AS x) t52 ON t1.x = t52.x JOIN (SELECT 1 AS x) t53 ON t1.x = t53.x JOIN (SELECT 1 AS x) t54 ON t1.x = t54.x JOIN (SELECT 1 AS x) t55 ON t1.x = t55.x JOIN (SELECT 1 AS x) t56 ON t1.x = t56.x JOIN (SELECT 1 AS x) t57 ON t1.x = t57.x JOIN (SELECT 1 AS x) t58 ON t1.x = t58.x JOIN (SELECT 1 AS x) t59 ON t1.x = t59.x JOIN (SELECT 1 AS x) t60 ON t1.x = t60.x JOIN (SELECT 1 AS x) t61 ON t1.x = t61.x JOIN (SELECT 1 AS x) t62 ON t1.x = t62.x JOIN (SELECT 1 AS x) t63 ON t1.x = t63.x JOIN (SELECT 1 AS x) t64 ON t1.x = t64.x JOIN (SELECT 1 AS x) t65 ON t1.x = t65.x JOIN (SELECT 1 AS x) t66 ON t1.x = t66.x JOIN (SELECT 1 AS x) t67 ON t1.x = t67.x JOIN (SELECT 1 AS x) t68 ON t1.x = t68.x JOIN (SELECT 1 AS x) t69 ON t1.x = t69.x JOIN (SELECT 1 AS x) t70 ON t1.x = t70.x JOIN (SELECT 1 AS x) t71 ON t1.x = t71.x JOIN (SELECT 1 AS x) t72 ON t1.x = t72.x JOIN (SELECT 1 AS x) t73 ON t1.x = t73.x JOIN (SELECT 1 AS x) t74 ON t1.x = t74.x JOIN (SELECT 1 AS x) t75 ON t1.x = t75.x JOIN (SELECT 1 AS x) t76 ON t1.x = t76.x JOIN (SELECT 1 AS x) t77 ON t1.x = t77.x JOIN (SELECT 1 AS x) t78 ON t1.x = t78.x JOIN (SELECT 1 AS x) t79 ON t1.x = t79.x JOIN (SELECT 1 AS x) t80 ON t1.x = t80.x JOIN (SELECT 1 AS x) t81 ON t1.x = t81.x JOIN (SELECT 1 AS x) t82 ON t1.x = t82.x JOIN (SELECT 1 AS x) t83 ON t1.x = t83.x JOIN (SELECT 1 AS x) t84 ON t1.x = t84.x JOIN (SELECT 1 AS x) t85 ON t1.x = t85.x JOIN (SELECT 1 AS x) t86 ON t1.x = t86.x JOIN (SELECT 1 AS x) t87 ON t1.x = t87.x JOIN (SELECT 1 AS x) t88 ON t1.x = t88.x JOIN (SELECT 1 AS x) t89 ON t1.x = t89.x JOIN (SELECT 1 AS x) t90 ON t1.x = t90.x JOIN (SELECT 1 AS x) t91 ON t1.x = t91.x JOIN (SELECT 1 AS x) t92 ON t1.x = t92.x JOIN (SELECT 1 AS x) t93 ON t1.x = t93.x JOIN (SELECT 1 AS x) t94 ON t1.x = t94.x JOIN (SELECT 1 AS x) t95 ON t1.x = t95.x JOIN (SELECT 1 AS x) t96 ON t1.x = t96.x JOIN (SELECT 1 AS x) t97 ON t1.x = t97.x JOIN (SELECT 1 AS x) t98 ON t1.x = t98.x JOIN (SELECT 1 AS x) t99 ON t1.x = t99.x JOIN (SELECT 1 AS x) t100 ON t1.x = t100.x JOIN (SELECT 1 AS x) t101 ON t1.x = t101.x JOIN (SELECT 1 AS x) t102 ON t1.x = t102.x JOIN (SELECT 1 AS x) t103 ON t1.x = t103.x JOIN (SELECT 1 AS x) t104 ON t1.x = t104.x JOIN (SELECT 1 AS x) t105 ON t1.x = t105.x JOIN (SELECT 1 AS x) t106 ON t1.x = t106.x JOIN (SELECT 1 AS x) t107 ON t1.x = t107.x JOIN (SELECT 1 AS x) t108 ON t1.x = t108.x JOIN (SELECT 1 AS x) t109 ON t1.x = t109.x JOIN (SELECT 1 AS x) t110 ON t1.x = t110.x JOIN (SELECT 1 AS x) t111 ON t1.x = t111.x JOIN (SELECT 1 AS x) t112 ON t1.x = t112.x JOIN (SELECT 1 AS x) t113 ON t1.x = t113.x JOIN (SELECT 1 AS x) t114 ON t1.x = t114.x JOIN (SELECT 1 AS x) t115 ON t1.x = t115.x JOIN (SELECT 1 AS x) t116 ON t1.x = t116.x JOIN (SELECT 1 AS x) t117 ON t1.x = t117.x JOIN (SELECT 1 AS x) t118 ON t1.x = t118.x JOIN (SELECT 1 AS x) t119 ON t1.x = t119.x JOIN (SELECT 1 AS x) t120 ON t1.x = t120.x JOIN (SELECT 1 AS x) t121 ON t1.x = t121.x JOIN (SELECT 1 AS x) t122 ON t1.x = t122.x JOIN (SELECT 1 AS x) t123 ON t1.x = t123.x JOIN (SELECT 1 AS x) t124 ON t1.x = t124.x JOIN (SELECT 1 AS x) t125 ON t1.x = t125.x JOIN (SELECT 1 AS x) t126 ON t1.x = t126.x JOIN (SELECT 1 AS x) t127 ON t1.x = t127.x JOIN (SELECT 1 AS x) t128 ON t1.x = t128.x JOIN (SELECT 1 AS x) t129 ON t1.x = t129.x JOIN (SELECT 1 AS x) t130 ON t1.x = t130.x JOIN (SELECT 1 AS x) t131 ON t1.x = t131.x JOIN (SELECT 1 AS x) t132 ON t1.x = t132.x JOIN (SELECT 1 AS x) t133 ON t1.x = t133.x JOIN (SELECT 1 AS x) t134 ON t1.x = t134.x JOIN (SELECT 1 AS x) t135 ON t1.x = t135.x JOIN (SELECT 1 AS x) t136 ON t1.x = t136.x JOIN (SELECT 1 AS x) t137 ON t1.x = t137.x JOIN (SELECT 1 AS x) t138 ON t1.x = t138.x JOIN (SELECT 1 AS x) t139 ON t1.x = t139.x JOIN (SELECT 1 AS x) t140 ON t1.x = t140.x JOIN (SELECT 1 AS x) t141 ON t1.x = t141.x JOIN (SELECT 1 AS x) t142 ON t1.x = t142.x JOIN (SELECT 1 AS x) t143 ON t1.x = t143.x JOIN (SELECT 1 AS x) t144 ON t1.x = t144.x JOIN (SELECT 1 AS x) t145 ON t1.x = t145.x JOIN (SELECT 1 AS x) t146 ON t1.x = t146.x JOIN (SELECT 1 AS x) t147 ON t1.x = t147.x JOIN (SELECT 1 AS x) t148 ON t1.x = t148.x JOIN (SELECT 1 AS x) t149 ON t1.x = t149.x JOIN (SELECT 1 AS x) t150 ON t1.x = t150.x JOIN (SELECT 1 AS x) t151 ON t1.x = t151.x JOIN (SELECT 1 AS x) t152 ON t1.x = t152.x JOIN (SELECT 1 AS x) t153 ON t1.x = t153.x JOIN (SELECT 1 AS x) t154 ON t1.x = t154.x JOIN (SELECT 1 AS x) t155 ON t1.x = t155.x JOIN (SELECT 1 AS x) t156 ON t1.x = t156.x JOIN (SELECT 1 AS x) t157 ON t1.x = t157.x JOIN (SELECT 1 AS x) t158 ON t1.x = t158.x JOIN (SELECT 1 AS x) t159 ON t1.x = t159.x JOIN (SELECT 1 AS x) t160 ON t1.x = t160.x JOIN (SELECT 1 AS x) t161 ON t1.x = t161.x JOIN (SELECT 1 AS x) t162 ON t1.x = t162.x JOIN (SELECT 1 AS x) t163 ON t1.x = t163.x JOIN (SELECT 1 AS x) t164 ON t1.x = t164.x JOIN (SELECT 1 AS x) t165 ON t1.x = t165.x JOIN (SELECT 1 AS x) t166 ON t1.x = t166.x JOIN (SELECT 1 AS x) t167 ON t1.x = t167.x JOIN (SELECT 1 AS x) t168 ON t1.x = t168.x JOIN (SELECT 1 AS x) t169 ON t1.x = t169.x JOIN (SELECT 1 AS x) t170 ON t1.x = t170.x JOIN (SELECT 1 AS x) t171 ON t1.x = t171.x JOIN (SELECT 1 AS x) t172 ON t1.x = t172.x JOIN (SELECT 1 AS x) t173 ON t1.x = t173.x JOIN (SELECT 1 AS x) t174 ON t1.x = t174.x JOIN (SELECT 1 AS x) t175 ON t1.x = t175.x JOIN (SELECT 1 AS x) t176 ON t1.x = t176.x JOIN (SELECT 1 AS x) t177 ON t1.x = t177.x JOIN (SELECT 1 AS x) t178 ON t1.x = t178.x JOIN (SELECT 1 AS x) t179 ON t1.x = t179.x JOIN (SELECT 1 AS x) t180 ON t1.x = t180.x JOIN (SELECT 1 AS x) t181 ON t1.x = t181.x JOIN (SELECT 1 AS x) t182 ON t1.x = t182.x JOIN (SELECT 1 AS x) t183 ON t1.x = t183.x JOIN (SELECT 1 AS x) t184 ON t1.x = t184.x JOIN (SELECT 1 AS x) t185 ON t1.x = t185.x JOIN (SELECT 1 AS x) t186 ON t1.x = t186.x JOIN (SELECT 1 AS x) t187 ON t1.x = t187.x JOIN (SELECT 1 AS x) t188 ON t1.x = t188.x JOIN (SELECT 1 AS x) t189 ON t1.x = t189.x JOIN (SELECT 1 AS x) t190 ON t1.x = t190.x JOIN (SELECT 1 AS x) t191 ON t1.x = t191.x JOIN (SELECT 1 AS x) t192 ON t1.x = t192.x JOIN (SELECT 1 AS x) t193 ON t1.x = t193.x JOIN (SELECT 1 AS x) t194 ON t1.x = t194.x JOIN (SELECT 1 AS x) t195 ON t1.x = t195.x JOIN (SELECT 1 AS x) t196 ON t1.x = t196.x JOIN (SELECT 1 AS x) t197 ON t1.x = t197.x JOIN (SELECT 1 AS x) t198 ON t1.x = t198.x JOIN (SELECT 1 AS x) t199 ON t1.x = t199.x JOIN (SELECT 1 AS x) t200 ON t1.x = t200.x JOIN (SELECT 1 AS x) t201 ON t1.x = t201.x JOIN (SELECT 1 AS x) t202 ON t1.x = t202.x JOIN (SELECT 1 AS x) t203 ON t1.x = t203.x JOIN (SELECT 1 AS x) t204 ON t1.x = t204.x JOIN (SELECT 1 AS x) t205 ON t1.x = t205.x JOIN (SELECT 1 AS x) t206 ON t1.x = t206.x JOIN (SELECT 1 AS x) t207 ON t1.x = t207.x JOIN (SELECT 1 AS x) t208 ON t1.x = t208.x JOIN (SELECT 1 AS x) t209 ON t1.x = t209.x JOIN (SELECT 1 AS x) t210 ON t1.x = t210.x JOIN (SELECT 1 AS x) t211 ON t1.x = t211.x JOIN (SELECT 1 AS x) t212 ON t1.x = t212.x JOIN (SELECT 1 AS x) t213 ON t1.x = t213.x JOIN (SELECT 1 AS x) t214 ON t1.x = t214.x JOIN (SELECT 1 AS x) t215 ON t1.x = t215.x JOIN (SELECT 1 AS x) t216 ON t1.x = t216.x JOIN (SELECT 1 AS x) t217 ON t1.x = t217.x JOIN (SELECT 1 AS x) t218 ON t1.x = t218.x JOIN (SELECT 1 AS x) t219 ON t1.x = t219.x JOIN (SELECT 1 AS x) t220 ON t1.x = t220.x JOIN (SELECT 1 AS x) t221 ON t1.x = t221.x JOIN (SELECT 1 AS x) t222 ON t1.x = t222.x JOIN (SELECT 1 AS x) t223 ON t1.x = t223.x JOIN (SELECT 1 AS x) t224 ON t1.x = t224.x JOIN (SELECT 1 AS x) t225 ON t1.x = t225.x JOIN (SELECT 1 AS x) t226 ON t1.x = t226.x JOIN (SELECT 1 AS x) t227 ON t1.x = t227.x JOIN (SELECT 1 AS x) t228 ON t1.x = t228.x JOIN (SELECT 1 AS x) t229 ON t1.x = t229.x JOIN (SELECT 1 AS x) t230 ON t1.x = t230.x JOIN (SELECT 1 AS x) t231 ON t1.x = t231.x JOIN (SELECT 1 AS x) t232 ON t1.x = t232.x JOIN (SELECT 1 AS x) t233 ON t1.x = t233.x JOIN (SELECT 1 AS x) t234 ON t1.x = t234.x JOIN (SELECT 1 AS x) t235 ON t1.x = t235.x JOIN (SELECT 1 AS x) t236 ON t1.x = t236.x JOIN (SELECT 1 AS x) t237 ON t1.x = t237.x JOIN (SELECT 1 AS x) t238 ON t1.x = t238.x JOIN (SELECT 1 AS x) t239 ON t1.x = t239.x JOIN (SELECT 1 AS x) t240 ON t1.x = t240.x JOIN (SELECT 1 AS x) t241 ON t1.x = t241.x JOIN (SELECT 1 AS x) t242 ON t1.x = t242.x JOIN (SELECT 1 AS x) t243 ON t1.x = t243.x JOIN (SELECT 1 AS x) t244 ON t1.x = t244.x JOIN (SELECT 1 AS x) t245 ON t1.x = t245.x JOIN (SELECT 1 AS x) t246 ON t1.x = t246.x JOIN (SELECT 1 AS x) t247 ON t1.x = t247.x JOIN (SELECT 1 AS x) t248 ON t1.x = t248.x JOIN (SELECT 1 AS x) t249 ON t1.x = t249.x JOIN (SELECT 1 AS x) t250 ON t1.x = t250.x JOIN (SELECT 1 AS x) t251 ON t1.x = t251.x JOIN (SELECT 1 AS x) t252 ON t1.x = t252.x JOIN (SELECT 1 AS x) t253 ON t1.x = t253.x JOIN (SELECT 1 AS x) t254 ON t1.x = t254.x JOIN (SELECT 1 AS x) t255 ON t1.x = t255.x JOIN (SELECT 1 AS x) t256 ON t1.x = t256.x JOIN (SELECT 1 AS x) t257 ON t1.x = t257.x JOIN (SELECT 1 AS x) t258 ON t1.x = t258.x JOIN (SELECT 1 AS x) t259 ON t1.x = t259.x JOIN (SELECT 1 AS x) t260 ON t1.x = t260.x JOIN (SELECT 1 AS x) t261 ON t1.x = t261.x JOIN (SELECT 1 AS x) t262 ON t1.x = t262.x JOIN (SELECT 1 AS x) t263 ON t1.x = t263.x JOIN (SELECT 1 AS x) t264 ON t1.x = t264.x JOIN (SELECT 1 AS x) t265 ON t1.x = t265.x JOIN (SELECT 1 AS x) t266 ON t1.x = t266.x JOIN (SELECT 1 AS x) t267 ON t1.x = t267.x JOIN (SELECT 1 AS x) t268 ON t1.x = t268.x JOIN (SELECT 1 AS x) t269 ON t1.x = t269.x JOIN (SELECT 1 AS x) t270 ON t1.x = t270.x JOIN (SELECT 1 AS x) t271 ON t1.x = t271.x JOIN (SELECT 1 AS x) t272 ON t1.x = t272.x JOIN (SELECT 1 AS x) t273 ON t1.x = t273.x JOIN (SELECT 1 AS x) t274 ON t1.x = t274.x JOIN (SELECT 1 AS x) t275 ON t1.x = t275.x JOIN (SELECT 1 AS x) t276 ON t1.x = t276.x JOIN (SELECT 1 AS x) t277 ON t1.x = t277.x JOIN (SELECT 1 AS x) t278 ON t1.x = t278.x JOIN (SELECT 1 AS x) t279 ON t1.x = t279.x JOIN (SELECT 1 AS x) t280 ON t1.x = t280.x JOIN (SELECT 1 AS x) t281 ON t1.x = t281.x JOIN (SELECT 1 AS x) t282 ON t1.x = t282.x JOIN (SELECT 1 AS x) t283 ON t1.x = t283.x JOIN (SELECT 1 AS x) t284 ON t1.x = t284.x JOIN (SELECT 1 AS x) t285 ON t1.x = t285.x JOIN (SELECT 1 AS x) t286 ON t1.x = t286.x JOIN (SELECT 1 AS x) t287 ON t1.x = t287.x JOIN (SELECT 1 AS x) t288 ON t1.x = t288.x JOIN (SELECT 1 AS x) t289 ON t1.x = t289.x JOIN (SELECT 1 AS x) t290 ON t1.x = t290.x JOIN (SELECT 1 AS x) t291 ON t1.x = t291.x JOIN (SELECT 1 AS x) t292 ON t1.x = t292.x JOIN (SELECT 1 AS x) t293 ON t1.x = t293.x JOIN (SELECT 1 AS x) t294 ON t1.x = t294.x JOIN (SELECT 1 AS x) t295 ON t1.x = t295.x JOIN (SELECT 1 AS x) t296 ON t1.x = t296.x JOIN (SELECT 1 AS x) t297 ON t1.x = t297.x JOIN (SELECT 1 AS x) t298 ON t1.x = t298.x JOIN (SELECT 1 AS x) t299 ON t1.x = t299.x JOIN (SELECT 1 AS x) t300 ON t1.x = t300.x JOIN (SELECT 1 AS x) t301 ON t1.x = t301.x JOIN (SELECT 1 AS x) t302 ON t1.x = t302.x JOIN (SELECT 1 AS x) t303 ON t1.x = t303.x JOIN (SELECT 1 AS x) t304 ON t1.x = t304.x JOIN (SELECT 1 AS x) t305 ON t1.x = t305.x JOIN (SELECT 1 AS x) t306 ON t1.x = t306.x JOIN (SELECT 1 AS x) t307 ON t1.x = t307.x JOIN (SELECT 1 AS x) t308 ON t1.x = t308.x JOIN (SELECT 1 AS x) t309 ON t1.x = t309.x JOIN (SELECT 1 AS x) t310 ON t1.x = t310.x JOIN (SELECT 1 AS x) t311 ON t1.x = t311.x JOIN (SELECT 1 AS x) t312 ON t1.x = t312.x JOIN (SELECT 1 AS x) t313 ON t1.x = t313.x JOIN (SELECT 1 AS x) t314 ON t1.x = t314.x JOIN (SELECT 1 AS x) t315 ON t1.x = t315.x JOIN (SELECT 1 AS x) t316 ON t1.x = t316.x JOIN (SELECT 1 AS x) t317 ON t1.x = t317.x JOIN (SELECT 1 AS x) t318 ON t1.x = t318.x JOIN (SELECT 1 AS x) t319 ON t1.x = t319.x JOIN (SELECT 1 AS x) t320 ON t1.x = t320.x JOIN (SELECT 1 AS x) t321 ON t1.x = t321.x JOIN (SELECT 1 AS x) t322 ON t1.x = t322.x JOIN (SELECT 1 AS x) t323 ON t1.x = t323.x JOIN (SELECT 1 AS x) t324 ON t1.x = t324.x JOIN (SELECT 1 AS x) t325 ON t1.x = t325.x JOIN (SELECT 1 AS x) t326 ON t1.x = t326.x JOIN (SELECT 1 AS x) t327 ON t1.x = t327.x JOIN (SELECT 1 AS x) t328 ON t1.x = t328.x JOIN (SELECT 1 AS x) t329 ON t1.x = t329.x JOIN (SELECT 1 AS x) t330 ON t1.x = t330.x JOIN (SELECT 1 AS x) t331 ON t1.x = t331.x JOIN (SELECT 1 AS x) t332 ON t1.x = t332.x JOIN (SELECT 1 AS x) t333 ON t1.x = t333.x JOIN (SELECT 1 AS x) t334 ON t1.x = t334.x JOIN (SELECT 1 AS x) t335 ON t1.x = t335.x JOIN (SELECT 1 AS x) t336 ON t1.x = t336.x JOIN (SELECT 1 AS x) t337 ON t1.x = t337.x JOIN (SELECT 1 AS x) t338 ON t1.x = t338.x JOIN (SELECT 1 AS x) t339 ON t1.x = t339.x JOIN (SELECT 1 AS x) t340 ON t1.x = t340.x JOIN (SELECT 1 AS x) t341 ON t1.x = t341.x JOIN (SELECT 1 AS x) t342 ON t1.x = t342.x JOIN (SELECT 1 AS x) t343 ON t1.x = t343.x JOIN (SELECT 1 AS x) t344 ON t1.x = t344.x JOIN (SELECT 1 AS x) t345 ON t1.x = t345.x JOIN (SELECT 1 AS x) t346 ON t1.x = t346.x JOIN (SELECT 1 AS x) t347 ON t1.x = t347.x JOIN (SELECT 1 AS x) t348 ON t1.x = t348.x JOIN (SELECT 1 AS x) t349 ON t1.x = t349.x JOIN (SELECT 1 AS x) t350 ON t1.x = t350.x JOIN (SELECT 1 AS x) t351 ON t1.x = t351.x JOIN (SELECT 1 AS x) t352 ON t1.x = t352.x JOIN (SELECT 1 AS x) t353 ON t1.x = t353.x JOIN (SELECT 1 AS x) t354 ON t1.x = t354.x JOIN (SELECT 1 AS x) t355 ON t1.x = t355.x JOIN (SELECT 1 AS x) t356 ON t1.x = t356.x JOIN (SELECT 1 AS x) t357 ON t1.x = t357.x JOIN (SELECT 1 AS x) t358 ON t1.x = t358.x JOIN (SELECT 1 AS x) t359 ON t1.x = t359.x JOIN (SELECT 1 AS x) t360 ON t1.x = t360.x JOIN (SELECT 1 AS x) t361 ON t1.x = t361.x JOIN (SELECT 1 AS x) t362 ON t1.x = t362.x JOIN (SELECT 1 AS x) t363 ON t1.x = t363.x JOIN (SELECT 1 AS x) t364 ON t1.x = t364.x JOIN (SELECT 1 AS x) t365 ON t1.x = t365.x JOIN (SELECT 1 AS x) t366 ON t1.x = t366.x JOIN (SELECT 1 AS x) t367 ON t1.x = t367.x JOIN (SELECT 1 AS x) t368 ON t1.x = t368.x JOIN (SELECT 1 AS x) t369 ON t1.x = t369.x JOIN (SELECT 1 AS x) t370 ON t1.x = t370.x JOIN (SELECT 1 AS x) t371 ON t1.x = t371.x JOIN (SELECT 1 AS x) t372 ON t1.x = t372.x JOIN (SELECT 1 AS x) t373 ON t1.x = t373.x JOIN (SELECT 1 AS x) t374 ON t1.x = t374.x JOIN (SELECT 1 AS x) t375 ON t1.x = t375.x JOIN (SELECT 1 AS x) t376 ON t1.x = t376.x JOIN (SELECT 1 AS x) t377 ON t1.x = t377.x JOIN (SELECT 1 AS x) t378 ON t1.x = t378.x JOIN (SELECT 1 AS x) t379 ON t1.x = t379.x JOIN (SELECT 1 AS x) t380 ON t1.x = t380.x JOIN (SELECT 1 AS x) t381 ON t1.x = t381.x JOIN (SELECT 1 AS x) t382 ON t1.x = t382.x JOIN (SELECT 1 AS x) t383 ON t1.x = t383.x JOIN (SELECT 1 AS x) t384 ON t1.x = t384.x JOIN (SELECT 1 AS x) t385 ON t1.x = t385.x JOIN (SELECT 1 AS x) t386 ON t1.x = t386.x JOIN (SELECT 1 AS x) t387 ON t1.x = t387.x JOIN (SELECT 1 AS x) t388 ON t1.x = t388.x JOIN (SELECT 1 AS x) t389 ON t1.x = t389.x JOIN (SELECT 1 AS x) t390 ON t1.x = t390.x JOIN (SELECT 1 AS x) t391 ON t1.x = t391.x JOIN (SELECT 1 AS x) t392 ON t1.x = t392.x JOIN (SELECT 1 AS x) t393 ON t1.x = t393.x JOIN (SELECT 1 AS x) t394 ON t1.x = t394.x JOIN (SELECT 1 AS x) t395 ON t1.x = t395.x JOIN (SELECT 1 AS x) t396 ON t1.x = t396.x JOIN (SELECT 1 AS x) t397 ON t1.x = t397.x JOIN (SELECT 1 AS x) t398 ON t1.x = t398.x JOIN (SELECT 1 AS x) t399 ON t1.x = t399.x JOIN (SELECT 1 AS x) t400 ON t1.x = t400.x JOIN (SELECT 1 AS x) t401 ON t1.x = t401.x JOIN (SELECT 1 AS x) t402 ON t1.x = t402.x JOIN (SELECT 1 AS x) t403 ON t1.x = t403.x JOIN (SELECT 1 AS x) t404 ON t1.x = t404.x JOIN (SELECT 1 AS x) t405 ON t1.x = t405.x JOIN (SELECT 1 AS x) t406 ON t1.x = t406.x JOIN (SELECT 1 AS x) t407 ON t1.x = t407.x JOIN (SELECT 1 AS x) t408 ON t1.x = t408.x JOIN (SELECT 1 AS x) t409 ON t1.x = t409.x JOIN (SELECT 1 AS x) t410 ON t1.x = t410.x JOIN (SELECT 1 AS x) t411 ON t1.x = t411.x JOIN (SELECT 1 AS x) t412 ON t1.x = t412.x JOIN (SELECT 1 AS x) t413 ON t1.x = t413.x JOIN (SELECT 1 AS x) t414 ON t1.x = t414.x JOIN (SELECT 1 AS x) t415 ON t1.x = t415.x JOIN (SELECT 1 AS x) t416 ON t1.x = t416.x JOIN (SELECT 1 AS x) t417 ON t1.x = t417.x JOIN (SELECT 1 AS x) t418 ON t1.x = t418.x JOIN (SELECT 1 AS x) t419 ON t1.x = t419.x JOIN (SELECT 1 AS x) t420 ON t1.x = t420.x JOIN (SELECT 1 AS x) t421 ON t1.x = t421.x JOIN (SELECT 1 AS x) t422 ON t1.x = t422.x JOIN (SELECT 1 AS x) t423 ON t1.x = t423.x JOIN (SELECT 1 AS x) t424 ON t1.x = t424.x JOIN (SELECT 1 AS x) t425 ON t1.x = t425.x JOIN (SELECT 1 AS x) t426 ON t1.x = t426.x JOIN (SELECT 1 AS x) t427 ON t1.x = t427.x JOIN (SELECT 1 AS x) t428 ON t1.x = t428.x JOIN (SELECT 1 AS x) t429 ON t1.x = t429.x JOIN (SELECT 1 AS x) t430 ON t1.x = t430.x JOIN (SELECT 1 AS x) t431 ON t1.x = t431.x JOIN (SELECT 1 AS x) t432 ON t1.x = t432.x JOIN (SELECT 1 AS x) t433 ON t1.x = t433.x JOIN (SELECT 1 AS x) t434 ON t1.x = t434.x JOIN (SELECT 1 AS x) t435 ON t1.x = t435.x JOIN (SELECT 1 AS x) t436 ON t1.x = t436.x JOIN (SELECT 1 AS x) t437 ON t1.x = t437.x JOIN (SELECT 1 AS x) t438 ON t1.x = t438.x JOIN (SELECT 1 AS x) t439 ON t1.x = t439.x JOIN (SELECT 1 AS x) t440 ON t1.x = t440.x JOIN (SELECT 1 AS x) t441 ON t1.x = t441.x JOIN (SELECT 1 AS x) t442 ON t1.x = t442.x JOIN (SELECT 1 AS x) t443 ON t1.x = t443.x JOIN (SELECT 1 AS x) t444 ON t1.x = t444.x JOIN (SELECT 1 AS x) t445 ON t1.x = t445.x JOIN (SELECT 1 AS x) t446 ON t1.x = t446.x JOIN (SELECT 1 AS x) t447 ON t1.x = t447.x JOIN (SELECT 1 AS x) t448 ON t1.x = t448.x JOIN (SELECT 1 AS x) t449 ON t1.x = t449.x JOIN (SELECT 1 AS x) t450 ON t1.x = t450.x JOIN (SELECT 1 AS x) t451 ON t1.x = t451.x JOIN (SELECT 1 AS x) t452 ON t1.x = t452.x JOIN (SELECT 1 AS x) t453 ON t1.x = t453.x JOIN (SELECT 1 AS x) t454 ON t1.x = t454.x JOIN (SELECT 1 AS x) t455 ON t1.x = t455.x JOIN (SELECT 1 AS x) t456 ON t1.x = t456.x JOIN (SELECT 1 AS x) t457 ON t1.x = t457.x JOIN (SELECT 1 AS x) t458 ON t1.x = t458.x JOIN (SELECT 1 AS x) t459 ON t1.x = t459.x JOIN (SELECT 1 AS x) t460 ON t1.x = t460.x JOIN (SELECT 1 AS x) t461 ON t1.x = t461.x JOIN (SELECT 1 AS x) t462 ON t1.x = t462.x JOIN (SELECT 1 AS x) t463 ON t1.x = t463.x JOIN (SELECT 1 AS x) t464 ON t1.x = t464.x JOIN (SELECT 1 AS x) t465 ON t1.x = t465.x JOIN (SELECT 1 AS x) t466 ON t1.x = t466.x JOIN (SELECT 1 AS x) t467 ON t1.x = t467.x JOIN (SELECT 1 AS x) t468 ON t1.x = t468.x JOIN (SELECT 1 AS x) t469 ON t1.x = t469.x JOIN (SELECT 1 AS x) t470 ON t1.x = t470.x JOIN (SELECT 1 AS x) t471 ON t1.x = t471.x JOIN (SELECT 1 AS x) t472 ON t1.x = t472.x JOIN (SELECT 1 AS x) t473 ON t1.x = t473.x JOIN (SELECT 1 AS x) t474 ON t1.x = t474.x JOIN (SELECT 1 AS x) t475 ON t1.x = t475.x JOIN (SELECT 1 AS x) t476 ON t1.x = t476.x JOIN (SELECT 1 AS x) t477 ON t1.x = t477.x JOIN (SELECT 1 AS x) t478 ON t1.x = t478.x JOIN (SELECT 1 AS x) t479 ON t1.x = t479.x JOIN (SELECT 1 AS x) t480 ON t1.x = t480.x JOIN (SELECT 1 AS x) t481 ON t1.x = t481.x JOIN (SELECT 1 AS x) t482 ON t1.x = t482.x JOIN (SELECT 1 AS x) t483 ON t1.x = t483.x JOIN (SELECT 1 AS x) t484 ON t1.x = t484.x JOIN (SELECT 1 AS x) t485 ON t1.x = t485.x JOIN (SELECT 1 AS x) t486 ON t1.x = t486.x JOIN (SELECT 1 AS x) t487 ON t1.x = t487.x JOIN (SELECT 1 AS x) t488 ON t1.x = t488.x JOIN (SELECT 1 AS x) t489 ON t1.x = t489.x JOIN (SELECT 1 AS x) t490 ON t1.x = t490.x JOIN (SELECT 1 AS x) t491 ON t1.x = t491.x JOIN (SELECT 1 AS x) t492 ON t1.x = t492.x JOIN (SELECT 1 AS x) t493 ON t1.x = t493.x JOIN (SELECT 1 AS x) t494 ON t1.x = t494.x JOIN (SELECT 1 AS x) t495 ON t1.x = t495.x JOIN (SELECT 1 AS x) t496 ON t1.x = t496.x JOIN (SELECT 1 AS x) t497 ON t1.x = t497.x JOIN (SELECT 1 AS x) t498 ON t1.x = t498.x JOIN (SELECT 1 AS x) t499 ON t1.x = t499.x JOIN (SELECT 1 AS x) t500 ON t1.x = t500.x JOIN (SELECT 1 AS x) t501 ON t1.x = t501.x JOIN (SELECT 1 AS x) t502 ON t1.x = t502.x JOIN (SELECT 1 AS x) t503 ON t1.x = t503.x JOIN (SELECT 1 AS x) t504 ON t1.x = t504.x JOIN (SELECT 1 AS x) t505 ON t1.x = t505.x JOIN (SELECT 1 AS x) t506 ON t1.x = t506.x JOIN (SELECT 1 AS x) t507 ON t1.x = t507.x JOIN (SELECT 1 AS x) t508 ON t1.x = t508.x JOIN (SELECT 1 AS x) t509 ON t1.x = t509.x JOIN (SELECT 1 AS x) t510 ON t1.x = t510.x JOIN (SELECT 1 AS x) t511 ON t1.x = t511.x JOIN (SELECT 1 AS x) t512 ON t1.x = t512.x JOIN (SELECT 1 AS x) t513 ON t1.x = t513.x JOIN (SELECT 1 AS x) t514 ON t1.x = t514.x JOIN (SELECT 1 AS x) t515 ON t1.x = t515.x JOIN (SELECT 1 AS x) t516 ON t1.x = t516.x JOIN (SELECT 1 AS x) t517 ON t1.x = t517.x JOIN (SELECT 1 AS x) t518 ON t1.x = t518.x JOIN (SELECT 1 AS x) t519 ON t1.x = t519.x JOIN (SELECT 1 AS x) t520 ON t1.x = t520.x JOIN (SELECT 1 AS x) t521 ON t1.x = t521.x JOIN (SELECT 1 AS x) t522 ON t1.x = t522.x JOIN (SELECT 1 AS x) t523 ON t1.x = t523.x JOIN (SELECT 1 AS x) t524 ON t1.x = t524.x JOIN (SELECT 1 AS x) t525 ON t1.x = t525.x JOIN (SELECT 1 AS x) t526 ON t1.x = t526.x JOIN (SELECT 1 AS x) t527 ON t1.x = t527.x JOIN (SELECT 1 AS x) t528 ON t1.x = t528.x JOIN (SELECT 1 AS x) t529 ON t1.x = t529.x JOIN (SELECT 1 AS x) t530 ON t1.x = t530.x JOIN (SELECT 1 AS x) t531 ON t1.x = t531.x JOIN (SELECT 1 AS x) t532 ON t1.x = t532.x JOIN (SELECT 1 AS x) t533 ON t1.x = t533.x JOIN (SELECT 1 AS x) t534 ON t1.x = t534.x JOIN (SELECT 1 AS x) t535 ON t1.x = t535.x JOIN (SELECT 1 AS x) t536 ON t1.x = t536.x JOIN (SELECT 1 AS x) t537 ON t1.x = t537.x JOIN (SELECT 1 AS x) t538 ON t1.x = t538.x JOIN (SELECT 1 AS x) t539 ON t1.x = t539.x JOIN (SELECT 1 AS x) t540 ON t1.x = t540.x JOIN (SELECT 1 AS x) t541 ON t1.x = t541.x JOIN (SELECT 1 AS x) t542 ON t1.x = t542.x JOIN (SELECT 1 AS x) t543 ON t1.x = t543.x JOIN (SELECT 1 AS x) t544 ON t1.x = t544.x JOIN (SELECT 1 AS x) t545 ON t1.x = t545.x JOIN (SELECT 1 AS x) t546 ON t1.x = t546.x JOIN (SELECT 1 AS x) t547 ON t1.x = t547.x JOIN (SELECT 1 AS x) t548 ON t1.x = t548.x JOIN (SELECT 1 AS x) t549 ON t1.x = t549.x JOIN (SELECT 1 AS x) t550 ON t1.x = t550.x JOIN (SELECT 1 AS x) t551 ON t1.x = t551.x JOIN (SELECT 1 AS x) t552 ON t1.x = t552.x JOIN (SELECT 1 AS x) t553 ON t1.x = t553.x JOIN (SELECT 1 AS x) t554 ON t1.x = t554.x JOIN (SELECT 1 AS x) t555 ON t1.x = t555.x JOIN (SELECT 1 AS x) t556 ON t1.x = t556.x JOIN (SELECT 1 AS x) t557 ON t1.x = t557.x JOIN (SELECT 1 AS x) t558 ON t1.x = t558.x JOIN (SELECT 1 AS x) t559 ON t1.x = t559.x JOIN (SELECT 1 AS x) t560 ON t1.x = t560.x JOIN (SELECT 1 AS x) t561 ON t1.x = t561.x JOIN (SELECT 1 AS x) t562 ON t1.x = t562.x JOIN (SELECT 1 AS x) t563 ON t1.x = t563.x JOIN (SELECT 1 AS x) t564 ON t1.x = t564.x JOIN (SELECT 1 AS x) t565 ON t1.x = t565.x JOIN (SELECT 1 AS x) t566 ON t1.x = t566.x JOIN (SELECT 1 AS x) t567 ON t1.x = t567.x JOIN (SELECT 1 AS x) t568 ON t1.x = t568.x JOIN (SELECT 1 AS x) t569 ON t1.x = t569.x JOIN (SELECT 1 AS x) t570 ON t1.x = t570.x JOIN (SELECT 1 AS x) t571 ON t1.x = t571.x JOIN (SELECT 1 AS x) t572 ON t1.x = t572.x JOIN (SELECT 1 AS x) t573 ON t1.x = t573.x JOIN (SELECT 1 AS x) t574 ON t1.x = t574.x JOIN (SELECT 1 AS x) t575 ON t1.x = t575.x JOIN (SELECT 1 AS x) t576 ON t1.x = t576.x JOIN (SELECT 1 AS x) t577 ON t1.x = t577.x JOIN (SELECT 1 AS x) t578 ON t1.x = t578.x JOIN (SELECT 1 AS x) t579 ON t1.x = t579.x JOIN (SELECT 1 AS x) t580 ON t1.x = t580.x JOIN (SELECT 1 AS x) t581 ON t1.x = t581.x JOIN (SELECT 1 AS x) t582 ON t1.x = t582.x JOIN (SELECT 1 AS x) t583 ON t1.x = t583.x JOIN (SELECT 1 AS x) t584 ON t1.x = t584.x JOIN (SELECT 1 AS x) t585 ON t1.x = t585.x JOIN (SELECT 1 AS x) t586 ON t1.x = t586.x JOIN (SELECT 1 AS x) t587 ON t1.x = t587.x JOIN (SELECT 1 AS x) t588 ON t1.x = t588.x JOIN (SELECT 1 AS x) t589 ON t1.x = t589.x JOIN (SELECT 1 AS x) t590 ON t1.x = t590.x JOIN (SELECT 1 AS x) t591 ON t1.x = t591.x JOIN (SELECT 1 AS x) t592 ON t1.x = t592.x JOIN (SELECT 1 AS x) t593 ON t1.x = t593.x JOIN (SELECT 1 AS x) t594 ON t1.x = t594.x JOIN (SELECT 1 AS x) t595 ON t1.x = t595.x JOIN (SELECT 1 AS x) t596 ON t1.x = t596.x JOIN (SELECT 1 AS x) t597 ON t1.x = t597.x JOIN (SELECT 1 AS x) t598 ON t1.x = t598.x JOIN (SELECT 1 AS x) t599 ON t1.x = t599.x JOIN (SELECT 1 AS x) t600 ON t1.x = t600.x JOIN (SELECT 1 AS x) t601 ON t1.x = t601.x JOIN (SELECT 1 AS x) t602 ON t1.x = t602.x JOIN (SELECT 1 AS x) t603 ON t1.x = t603.x JOIN (SELECT 1 AS x) t604 ON t1.x = t604.x JOIN (SELECT 1 AS x) t605 ON t1.x = t605.x JOIN (SELECT 1 AS x) t606 ON t1.x = t606.x JOIN (SELECT 1 AS x) t607 ON t1.x = t607.x JOIN (SELECT 1 AS x) t608 ON t1.x = t608.x JOIN (SELECT 1 AS x) t609 ON t1.x = t609.x JOIN (SELECT 1 AS x) t610 ON t1.x = t610.x JOIN (SELECT 1 AS x) t611 ON t1.x = t611.x JOIN (SELECT 1 AS x) t612 ON t1.x = t612.x JOIN (SELECT 1 AS x) t613 ON t1.x = t613.x JOIN (SELECT 1 AS x) t614 ON t1.x = t614.x JOIN (SELECT 1 AS x) t615 ON t1.x = t615.x JOIN (SELECT 1 AS x) t616 ON t1.x = t616.x JOIN (SELECT 1 AS x) t617 ON t1.x = t617.x JOIN (SELECT 1 AS x) t618 ON t1.x = t618.x JOIN (SELECT 1 AS x) t619 ON t1.x = t619.x JOIN (SELECT 1 AS x) t620 ON t1.x = t620.x JOIN (SELECT 1 AS x) t621 ON t1.x = t621.x JOIN (SELECT 1 AS x) t622 ON t1.x = t622.x JOIN (SELECT 1 AS x) t623 ON t1.x = t623.x JOIN (SELECT 1 AS x) t624 ON t1.x = t624.x JOIN (SELECT 1 AS x) t625 ON t1.x = t625.x JOIN (SELECT 1 AS x) t626 ON t1.x = t626.x JOIN (SELECT 1 AS x) t627 ON t1.x = t627.x JOIN (SELECT 1 AS x) t628 ON t1.x = t628.x JOIN (SELECT 1 AS x) t629 ON t1.x = t629.x JOIN (SELECT 1 AS x) t630 ON t1.x = t630.x JOIN (SELECT 1 AS x) t631 ON t1.x = t631.x JOIN (SELECT 1 AS x) t632 ON t1.x = t632.x JOIN (SELECT 1 AS x) t633 ON t1.x = t633.x JOIN (SELECT 1 AS x) t634 ON t1.x = t634.x JOIN (SELECT 1 AS x) t635 ON t1.x = t635.x JOIN (SELECT 1 AS x) t636 ON t1.x = t636.x JOIN (SELECT 1 AS x) t637 ON t1.x = t637.x JOIN (SELECT 1 AS x) t638 ON t1.x = t638.x JOIN (SELECT 1 AS x) t639 ON t1.x = t639.x JOIN (SELECT 1 AS x) t640 ON t1.x = t640.x JOIN (SELECT 1 AS x) t641 ON t1.x = t641.x JOIN (SELECT 1 AS x) t642 ON t1.x = t642.x JOIN (SELECT 1 AS x) t643 ON t1.x = t643.x JOIN (SELECT 1 AS x) t644 ON t1.x = t644.x JOIN (SELECT 1 AS x) t645 ON t1.x = t645.x JOIN (SELECT 1 AS x) t646 ON t1.x = t646.x JOIN (SELECT 1 AS x) t647 ON t1.x = t647.x JOIN (SELECT 1 AS x) t648 ON t1.x = t648.x JOIN (SELECT 1 AS x) t649 ON t1.x = t649.x JOIN (SELECT 1 AS x) t650 ON t1.x = t650.x JOIN (SELECT 1 AS x) t651 ON t1.x = t651.x JOIN (SELECT 1 AS x) t652 ON t1.x = t652.x JOIN (SELECT 1 AS x) t653 ON t1.x = t653.x JOIN (SELECT 1 AS x) t654 ON t1.x = t654.x JOIN (SELECT 1 AS x) t655 ON t1.x = t655.x JOIN (SELECT 1 AS x) t656 ON t1.x = t656.x JOIN (SELECT 1 AS x) t657 ON t1.x = t657.x JOIN (SELECT 1 AS x) t658 ON t1.x = t658.x JOIN (SELECT 1 AS x) t659 ON t1.x = t659.x JOIN (SELECT 1 AS x) t660 ON t1.x = t660.x JOIN (SELECT 1 AS x) t661 ON t1.x = t661.x JOIN (SELECT 1 AS x) t662 ON t1.x = t662.x JOIN (SELECT 1 AS x) t663 ON t1.x = t663.x JOIN (SELECT 1 AS x) t664 ON t1.x = t664.x JOIN (SELECT 1 AS x) t665 ON t1.x = t665.x JOIN (SELECT 1 AS x) t666 ON t1.x = t666.x +SELECT * FROM (SELECT 1 AS x) t1 JOIN (SELECT 1 AS x) t2 ON t1.x = t2.x JOIN (SELECT 1 AS x) t3 ON t1.x = t3.x JOIN (SELECT 1 AS x) t4 ON t1.x = t4.x JOIN (SELECT 1 AS x) t5 ON t1.x = t5.x JOIN (SELECT 1 AS x) t6 ON t1.x = t6.x JOIN (SELECT 1 AS x) t7 ON t1.x = t7.x JOIN (SELECT 1 AS x) t8 ON t1.x = t8.x JOIN (SELECT 1 AS x) t9 ON t1.x = t9.x JOIN (SELECT 1 AS x) t10 ON t1.x = t10.x JOIN (SELECT 1 AS x) t11 ON t1.x = t11.x JOIN (SELECT 1 AS x) t12 ON t1.x = t12.x JOIN (SELECT 1 AS x) t13 ON t1.x = t13.x JOIN (SELECT 1 AS x) t14 ON t1.x = t14.x JOIN (SELECT 1 AS x) t15 ON t1.x = t15.x JOIN (SELECT 1 AS x) t16 ON t1.x = t16.x JOIN (SELECT 1 AS x) t17 ON t1.x = t17.x JOIN (SELECT 1 AS x) t18 ON t1.x = t18.x JOIN (SELECT 1 AS x) t19 ON t1.x = t19.x JOIN (SELECT 1 AS x) t20 ON t1.x = t20.x JOIN (SELECT 1 AS x) t21 ON t1.x = t21.x JOIN (SELECT 1 AS x) t22 ON t1.x = t22.x JOIN (SELECT 1 AS x) t23 ON t1.x = t23.x JOIN (SELECT 1 AS x) t24 ON t1.x = t24.x JOIN (SELECT 1 AS x) t25 ON t1.x = t25.x JOIN (SELECT 1 AS x) t26 ON t1.x = t26.x JOIN (SELECT 1 AS x) t27 ON t1.x = t27.x JOIN (SELECT 1 AS x) t28 ON t1.x = t28.x JOIN (SELECT 1 AS x) t29 ON t1.x = t29.x JOIN (SELECT 1 AS x) t30 ON t1.x = t30.x JOIN (SELECT 1 AS x) t31 ON t1.x = t31.x JOIN (SELECT 1 AS x) t32 ON t1.x = t32.x JOIN (SELECT 1 AS x) t33 ON t1.x = t33.x JOIN (SELECT 1 AS x) t34 ON t1.x = t34.x JOIN (SELECT 1 AS x) t35 ON t1.x = t35.x JOIN (SELECT 1 AS x) t36 ON t1.x = t36.x JOIN (SELECT 1 AS x) t37 ON t1.x = t37.x JOIN (SELECT 1 AS x) t38 ON t1.x = t38.x JOIN (SELECT 1 AS x) t39 ON t1.x = t39.x JOIN (SELECT 1 AS x) t40 ON t1.x = t40.x JOIN (SELECT 1 AS x) t41 ON t1.x = t41.x JOIN (SELECT 1 AS x) t42 ON t1.x = t42.x JOIN (SELECT 1 AS x) t43 ON t1.x = t43.x JOIN (SELECT 1 AS x) t44 ON t1.x = t44.x JOIN (SELECT 1 AS x) t45 ON t1.x = t45.x JOIN (SELECT 1 AS x) t46 ON t1.x = t46.x JOIN (SELECT 1 AS x) t47 ON t1.x = t47.x JOIN (SELECT 1 AS x) t48 ON t1.x = t48.x JOIN (SELECT 1 AS x) t49 ON t1.x = t49.x JOIN (SELECT 1 AS x) t50 ON t1.x = t50.x JOIN (SELECT 1 AS x) t51 ON t1.x = t51.x JOIN (SELECT 1 AS x) t52 ON t1.x = t52.x JOIN (SELECT 1 AS x) t53 ON t1.x = t53.x JOIN (SELECT 1 AS x) t54 ON t1.x = t54.x JOIN (SELECT 1 AS x) t55 ON t1.x = t55.x JOIN (SELECT 1 AS x) t56 ON t1.x = t56.x JOIN (SELECT 1 AS x) t57 ON t1.x = t57.x JOIN (SELECT 1 AS x) t58 ON t1.x = t58.x JOIN (SELECT 1 AS x) t59 ON t1.x = t59.x JOIN (SELECT 1 AS x) t60 ON t1.x = t60.x JOIN (SELECT 1 AS x) t61 ON t1.x = t61.x JOIN (SELECT 1 AS x) t62 ON t1.x = t62.x JOIN (SELECT 1 AS x) t63 ON t1.x = t63.x JOIN (SELECT 1 AS x) t64 ON t1.x = t64.x JOIN (SELECT 1 AS x) t65 ON t1.x = t65.x JOIN (SELECT 1 AS x) t66 ON t1.x = t66.x JOIN (SELECT 1 AS x) t67 ON t1.x = t67.x JOIN (SELECT 1 AS x) t68 ON t1.x = t68.x JOIN (SELECT 1 AS x) t69 ON t1.x = t69.x JOIN (SELECT 1 AS x) t70 ON t1.x = t70.x JOIN (SELECT 1 AS x) t71 ON t1.x = t71.x JOIN (SELECT 1 AS x) t72 ON t1.x = t72.x JOIN (SELECT 1 AS x) t73 ON t1.x = t73.x JOIN (SELECT 1 AS x) t74 ON t1.x = t74.x JOIN (SELECT 1 AS x) t75 ON t1.x = t75.x JOIN (SELECT 1 AS x) t76 ON t1.x = t76.x JOIN (SELECT 1 AS x) t77 ON t1.x = t77.x JOIN (SELECT 1 AS x) t78 ON t1.x = t78.x JOIN (SELECT 1 AS x) t79 ON t1.x = t79.x JOIN (SELECT 1 AS x) t80 ON t1.x = t80.x JOIN (SELECT 1 AS x) t81 ON t1.x = t81.x JOIN (SELECT 1 AS x) t82 ON t1.x = t82.x JOIN (SELECT 1 AS x) t83 ON t1.x = t83.x JOIN (SELECT 1 AS x) t84 ON t1.x = t84.x JOIN (SELECT 1 AS x) t85 ON t1.x = t85.x JOIN (SELECT 1 AS x) t86 ON t1.x = t86.x JOIN (SELECT 1 AS x) t87 ON t1.x = t87.x JOIN (SELECT 1 AS x) t88 ON t1.x = t88.x JOIN (SELECT 1 AS x) t89 ON t1.x = t89.x JOIN (SELECT 1 AS x) t90 ON t1.x = t90.x JOIN (SELECT 1 AS x) t91 ON t1.x = t91.x JOIN (SELECT 1 AS x) t92 ON t1.x = t92.x JOIN (SELECT 1 AS x) t93 ON t1.x = t93.x JOIN (SELECT 1 AS x) t94 ON t1.x = t94.x JOIN (SELECT 1 AS x) t95 ON t1.x = t95.x JOIN (SELECT 1 AS x) t96 ON t1.x = t96.x JOIN (SELECT 1 AS x) t97 ON t1.x = t97.x JOIN (SELECT 1 AS x) t98 ON t1.x = t98.x JOIN (SELECT 1 AS x) t99 ON t1.x = t99.x JOIN (SELECT 1 AS x) t100 ON t1.x = t100.x JOIN (SELECT 1 AS x) t101 ON t1.x = t101.x JOIN (SELECT 1 AS x) t102 ON t1.x = t102.x JOIN (SELECT 1 AS x) t103 ON t1.x = t103.x JOIN (SELECT 1 AS x) t104 ON t1.x = t104.x JOIN (SELECT 1 AS x) t105 ON t1.x = t105.x JOIN (SELECT 1 AS x) t106 ON t1.x = t106.x JOIN (SELECT 1 AS x) t107 ON t1.x = t107.x JOIN (SELECT 1 AS x) t108 ON t1.x = t108.x JOIN (SELECT 1 AS x) t109 ON t1.x = t109.x JOIN (SELECT 1 AS x) t110 ON t1.x = t110.x JOIN (SELECT 1 AS x) t111 ON t1.x = t111.x JOIN (SELECT 1 AS x) t112 ON t1.x = t112.x JOIN (SELECT 1 AS x) t113 ON t1.x = t113.x JOIN (SELECT 1 AS x) t114 ON t1.x = t114.x JOIN (SELECT 1 AS x) t115 ON t1.x = t115.x JOIN (SELECT 1 AS x) t116 ON t1.x = t116.x JOIN (SELECT 1 AS x) t117 ON t1.x = t117.x JOIN (SELECT 1 AS x) t118 ON t1.x = t118.x JOIN (SELECT 1 AS x) t119 ON t1.x = t119.x JOIN (SELECT 1 AS x) t120 ON t1.x = t120.x JOIN (SELECT 1 AS x) t121 ON t1.x = t121.x JOIN (SELECT 1 AS x) t122 ON t1.x = t122.x JOIN (SELECT 1 AS x) t123 ON t1.x = t123.x JOIN (SELECT 1 AS x) t124 ON t1.x = t124.x JOIN (SELECT 1 AS x) t125 ON t1.x = t125.x JOIN (SELECT 1 AS x) t126 ON t1.x = t126.x JOIN (SELECT 1 AS x) t127 ON t1.x = t127.x JOIN (SELECT 1 AS x) t128 ON t1.x = t128.x JOIN (SELECT 1 AS x) t129 ON t1.x = t129.x JOIN (SELECT 1 AS x) t130 ON t1.x = t130.x JOIN (SELECT 1 AS x) t131 ON t1.x = t131.x JOIN (SELECT 1 AS x) t132 ON t1.x = t132.x JOIN (SELECT 1 AS x) t133 ON t1.x = t133.x JOIN (SELECT 1 AS x) t134 ON t1.x = t134.x JOIN (SELECT 1 AS x) t135 ON t1.x = t135.x JOIN (SELECT 1 AS x) t136 ON t1.x = t136.x JOIN (SELECT 1 AS x) t137 ON t1.x = t137.x JOIN (SELECT 1 AS x) t138 ON t1.x = t138.x JOIN (SELECT 1 AS x) t139 ON t1.x = t139.x JOIN (SELECT 1 AS x) t140 ON t1.x = t140.x JOIN (SELECT 1 AS x) t141 ON t1.x = t141.x JOIN (SELECT 1 AS x) t142 ON t1.x = t142.x JOIN (SELECT 1 AS x) t143 ON t1.x = t143.x JOIN (SELECT 1 AS x) t144 ON t1.x = t144.x JOIN (SELECT 1 AS x) t145 ON t1.x = t145.x JOIN (SELECT 1 AS x) t146 ON t1.x = t146.x JOIN (SELECT 1 AS x) t147 ON t1.x = t147.x JOIN (SELECT 1 AS x) t148 ON t1.x = t148.x JOIN (SELECT 1 AS x) t149 ON t1.x = t149.x JOIN (SELECT 1 AS x) t150 ON t1.x = t150.x JOIN (SELECT 1 AS x) t151 ON t1.x = t151.x JOIN (SELECT 1 AS x) t152 ON t1.x = t152.x JOIN (SELECT 1 AS x) t153 ON t1.x = t153.x JOIN (SELECT 1 AS x) t154 ON t1.x = t154.x JOIN (SELECT 1 AS x) t155 ON t1.x = t155.x JOIN (SELECT 1 AS x) t156 ON t1.x = t156.x JOIN (SELECT 1 AS x) t157 ON t1.x = t157.x JOIN (SELECT 1 AS x) t158 ON t1.x = t158.x JOIN (SELECT 1 AS x) t159 ON t1.x = t159.x JOIN (SELECT 1 AS x) t160 ON t1.x = t160.x JOIN (SELECT 1 AS x) t161 ON t1.x = t161.x JOIN (SELECT 1 AS x) t162 ON t1.x = t162.x JOIN (SELECT 1 AS x) t163 ON t1.x = t163.x JOIN (SELECT 1 AS x) t164 ON t1.x = t164.x JOIN (SELECT 1 AS x) t165 ON t1.x = t165.x JOIN (SELECT 1 AS x) t166 ON t1.x = t166.x JOIN (SELECT 1 AS x) t167 ON t1.x = t167.x JOIN (SELECT 1 AS x) t168 ON t1.x = t168.x JOIN (SELECT 1 AS x) t169 ON t1.x = t169.x JOIN (SELECT 1 AS x) t170 ON t1.x = t170.x JOIN (SELECT 1 AS x) t171 ON t1.x = t171.x JOIN (SELECT 1 AS x) t172 ON t1.x = t172.x JOIN (SELECT 1 AS x) t173 ON t1.x = t173.x JOIN (SELECT 1 AS x) t174 ON t1.x = t174.x JOIN (SELECT 1 AS x) t175 ON t1.x = t175.x JOIN (SELECT 1 AS x) t176 ON t1.x = t176.x JOIN (SELECT 1 AS x) t177 ON t1.x = t177.x JOIN (SELECT 1 AS x) t178 ON t1.x = t178.x JOIN (SELECT 1 AS x) t179 ON t1.x = t179.x JOIN (SELECT 1 AS x) t180 ON t1.x = t180.x JOIN (SELECT 1 AS x) t181 ON t1.x = t181.x JOIN (SELECT 1 AS x) t182 ON t1.x = t182.x JOIN (SELECT 1 AS x) t183 ON t1.x = t183.x JOIN (SELECT 1 AS x) t184 ON t1.x = t184.x JOIN (SELECT 1 AS x) t185 ON t1.x = t185.x JOIN (SELECT 1 AS x) t186 ON t1.x = t186.x JOIN (SELECT 1 AS x) t187 ON t1.x = t187.x JOIN (SELECT 1 AS x) t188 ON t1.x = t188.x JOIN (SELECT 1 AS x) t189 ON t1.x = t189.x JOIN (SELECT 1 AS x) t190 ON t1.x = t190.x JOIN (SELECT 1 AS x) t191 ON t1.x = t191.x JOIN (SELECT 1 AS x) t192 ON t1.x = t192.x JOIN (SELECT 1 AS x) t193 ON t1.x = t193.x JOIN (SELECT 1 AS x) t194 ON t1.x = t194.x JOIN (SELECT 1 AS x) t195 ON t1.x = t195.x JOIN (SELECT 1 AS x) t196 ON t1.x = t196.x JOIN (SELECT 1 AS x) t197 ON t1.x = t197.x JOIN (SELECT 1 AS x) t198 ON t1.x = t198.x JOIN (SELECT 1 AS x) t199 ON t1.x = t199.x JOIN (SELECT 1 AS x) t200 ON t1.x = t200.x JOIN (SELECT 1 AS x) t201 ON t1.x = t201.x JOIN (SELECT 1 AS x) t202 ON t1.x = t202.x JOIN (SELECT 1 AS x) t203 ON t1.x = t203.x JOIN (SELECT 1 AS x) t204 ON t1.x = t204.x JOIN (SELECT 1 AS x) t205 ON t1.x = t205.x JOIN (SELECT 1 AS x) t206 ON t1.x = t206.x JOIN (SELECT 1 AS x) t207 ON t1.x = t207.x JOIN (SELECT 1 AS x) t208 ON t1.x = t208.x JOIN (SELECT 1 AS x) t209 ON t1.x = t209.x JOIN (SELECT 1 AS x) t210 ON t1.x = t210.x JOIN (SELECT 1 AS x) t211 ON t1.x = t211.x JOIN (SELECT 1 AS x) t212 ON t1.x = t212.x JOIN (SELECT 1 AS x) t213 ON t1.x = t213.x JOIN (SELECT 1 AS x) t214 ON t1.x = t214.x JOIN (SELECT 1 AS x) t215 ON t1.x = t215.x JOIN (SELECT 1 AS x) t216 ON t1.x = t216.x JOIN (SELECT 1 AS x) t217 ON t1.x = t217.x JOIN (SELECT 1 AS x) t218 ON t1.x = t218.x JOIN (SELECT 1 AS x) t219 ON t1.x = t219.x JOIN (SELECT 1 AS x) t220 ON t1.x = t220.x JOIN (SELECT 1 AS x) t221 ON t1.x = t221.x JOIN (SELECT 1 AS x) t222 ON t1.x = t222.x JOIN (SELECT 1 AS x) t223 ON t1.x = t223.x JOIN (SELECT 1 AS x) t224 ON t1.x = t224.x JOIN (SELECT 1 AS x) t225 ON t1.x = t225.x JOIN (SELECT 1 AS x) t226 ON t1.x = t226.x JOIN (SELECT 1 AS x) t227 ON t1.x = t227.x JOIN (SELECT 1 AS x) t228 ON t1.x = t228.x JOIN (SELECT 1 AS x) t229 ON t1.x = t229.x JOIN (SELECT 1 AS x) t230 ON t1.x = t230.x JOIN (SELECT 1 AS x) t231 ON t1.x = t231.x JOIN (SELECT 1 AS x) t232 ON t1.x = t232.x JOIN (SELECT 1 AS x) t233 ON t1.x = t233.x JOIN (SELECT 1 AS x) t234 ON t1.x = t234.x JOIN (SELECT 1 AS x) t235 ON t1.x = t235.x JOIN (SELECT 1 AS x) t236 ON t1.x = t236.x JOIN (SELECT 1 AS x) t237 ON t1.x = t237.x JOIN (SELECT 1 AS x) t238 ON t1.x = t238.x JOIN (SELECT 1 AS x) t239 ON t1.x = t239.x JOIN (SELECT 1 AS x) t240 ON t1.x = t240.x JOIN (SELECT 1 AS x) t241 ON t1.x = t241.x JOIN (SELECT 1 AS x) t242 ON t1.x = t242.x JOIN (SELECT 1 AS x) t243 ON t1.x = t243.x JOIN (SELECT 1 AS x) t244 ON t1.x = t244.x JOIN (SELECT 1 AS x) t245 ON t1.x = t245.x JOIN (SELECT 1 AS x) t246 ON t1.x = t246.x JOIN (SELECT 1 AS x) t247 ON t1.x = t247.x JOIN (SELECT 1 AS x) t248 ON t1.x = t248.x JOIN (SELECT 1 AS x) t249 ON t1.x = t249.x JOIN (SELECT 1 AS x) t250 ON t1.x = t250.x JOIN (SELECT 1 AS x) t251 ON t1.x = t251.x JOIN (SELECT 1 AS x) t252 ON t1.x = t252.x JOIN (SELECT 1 AS x) t253 ON t1.x = t253.x JOIN (SELECT 1 AS x) t254 ON t1.x = t254.x JOIN (SELECT 1 AS x) t255 ON t1.x = t255.x JOIN (SELECT 1 AS x) t256 ON t1.x = t256.x JOIN (SELECT 1 AS x) t257 ON t1.x = t257.x JOIN (SELECT 1 AS x) t258 ON t1.x = t258.x JOIN (SELECT 1 AS x) t259 ON t1.x = t259.x JOIN (SELECT 1 AS x) t260 ON t1.x = t260.x JOIN (SELECT 1 AS x) t261 ON t1.x = t261.x JOIN (SELECT 1 AS x) t262 ON t1.x = t262.x JOIN (SELECT 1 AS x) t263 ON t1.x = t263.x JOIN (SELECT 1 AS x) t264 ON t1.x = t264.x JOIN (SELECT 1 AS x) t265 ON t1.x = t265.x JOIN (SELECT 1 AS x) t266 ON t1.x = t266.x JOIN (SELECT 1 AS x) t267 ON t1.x = t267.x JOIN (SELECT 1 AS x) t268 ON t1.x = t268.x JOIN (SELECT 1 AS x) t269 ON t1.x = t269.x JOIN (SELECT 1 AS x) t270 ON t1.x = t270.x JOIN (SELECT 1 AS x) t271 ON t1.x = t271.x JOIN (SELECT 1 AS x) t272 ON t1.x = t272.x JOIN (SELECT 1 AS x) t273 ON t1.x = t273.x JOIN (SELECT 1 AS x) t274 ON t1.x = t274.x JOIN (SELECT 1 AS x) t275 ON t1.x = t275.x JOIN (SELECT 1 AS x) t276 ON t1.x = t276.x JOIN (SELECT 1 AS x) t277 ON t1.x = t277.x JOIN (SELECT 1 AS x) t278 ON t1.x = t278.x JOIN (SELECT 1 AS x) t279 ON t1.x = t279.x JOIN (SELECT 1 AS x) t280 ON t1.x = t280.x JOIN (SELECT 1 AS x) t281 ON t1.x = t281.x JOIN (SELECT 1 AS x) t282 ON t1.x = t282.x JOIN (SELECT 1 AS x) t283 ON t1.x = t283.x JOIN (SELECT 1 AS x) t284 ON t1.x = t284.x JOIN (SELECT 1 AS x) t285 ON t1.x = t285.x JOIN (SELECT 1 AS x) t286 ON t1.x = t286.x JOIN (SELECT 1 AS x) t287 ON t1.x = t287.x JOIN (SELECT 1 AS x) t288 ON t1.x = t288.x JOIN (SELECT 1 AS x) t289 ON t1.x = t289.x JOIN (SELECT 1 AS x) t290 ON t1.x = t290.x JOIN (SELECT 1 AS x) t291 ON t1.x = t291.x JOIN (SELECT 1 AS x) t292 ON t1.x = t292.x JOIN (SELECT 1 AS x) t293 ON t1.x = t293.x JOIN (SELECT 1 AS x) t294 ON t1.x = t294.x JOIN (SELECT 1 AS x) t295 ON t1.x = t295.x JOIN (SELECT 1 AS x) t296 ON t1.x = t296.x JOIN (SELECT 1 AS x) t297 ON t1.x = t297.x JOIN (SELECT 1 AS x) t298 ON t1.x = t298.x JOIN (SELECT 1 AS x) t299 ON t1.x = t299.x JOIN (SELECT 1 AS x) t300 ON t1.x = t300.x JOIN (SELECT 1 AS x) t301 ON t1.x = t301.x JOIN (SELECT 1 AS x) t302 ON t1.x = t302.x JOIN (SELECT 1 AS x) t303 ON t1.x = t303.x JOIN (SELECT 1 AS x) t304 ON t1.x = t304.x JOIN (SELECT 1 AS x) t305 ON t1.x = t305.x JOIN (SELECT 1 AS x) t306 ON t1.x = t306.x JOIN (SELECT 1 AS x) t307 ON t1.x = t307.x JOIN (SELECT 1 AS x) t308 ON t1.x = t308.x JOIN (SELECT 1 AS x) t309 ON t1.x = t309.x JOIN (SELECT 1 AS x) t310 ON t1.x = t310.x JOIN (SELECT 1 AS x) t311 ON t1.x = t311.x JOIN (SELECT 1 AS x) t312 ON t1.x = t312.x JOIN (SELECT 1 AS x) t313 ON t1.x = t313.x JOIN (SELECT 1 AS x) t314 ON t1.x = t314.x JOIN (SELECT 1 AS x) t315 ON t1.x = t315.x JOIN (SELECT 1 AS x) t316 ON t1.x = t316.x JOIN (SELECT 1 AS x) t317 ON t1.x = t317.x JOIN (SELECT 1 AS x) t318 ON t1.x = t318.x JOIN (SELECT 1 AS x) t319 ON t1.x = t319.x JOIN (SELECT 1 AS x) t320 ON t1.x = t320.x JOIN (SELECT 1 AS x) t321 ON t1.x = t321.x JOIN (SELECT 1 AS x) t322 ON t1.x = t322.x JOIN (SELECT 1 AS x) t323 ON t1.x = t323.x JOIN (SELECT 1 AS x) t324 ON t1.x = t324.x JOIN (SELECT 1 AS x) t325 ON t1.x = t325.x JOIN (SELECT 1 AS x) t326 ON t1.x = t326.x JOIN (SELECT 1 AS x) t327 ON t1.x = t327.x JOIN (SELECT 1 AS x) t328 ON t1.x = t328.x JOIN (SELECT 1 AS x) t329 ON t1.x = t329.x JOIN (SELECT 1 AS x) t330 ON t1.x = t330.x JOIN (SELECT 1 AS x) t331 ON t1.x = t331.x JOIN (SELECT 1 AS x) t332 ON t1.x = t332.x JOIN (SELECT 1 AS x) t333 ON t1.x = t333.x JOIN (SELECT 1 AS x) t334 ON t1.x = t334.x JOIN (SELECT 1 AS x) t335 ON t1.x = t335.x JOIN (SELECT 1 AS x) t336 ON t1.x = t336.x JOIN (SELECT 1 AS x) t337 ON t1.x = t337.x JOIN (SELECT 1 AS x) t338 ON t1.x = t338.x JOIN (SELECT 1 AS x) t339 ON t1.x = t339.x JOIN (SELECT 1 AS x) t340 ON t1.x = t340.x JOIN (SELECT 1 AS x) t341 ON t1.x = t341.x JOIN (SELECT 1 AS x) t342 ON t1.x = t342.x JOIN (SELECT 1 AS x) t343 ON t1.x = t343.x JOIN (SELECT 1 AS x) t344 ON t1.x = t344.x JOIN (SELECT 1 AS x) t345 ON t1.x = t345.x JOIN (SELECT 1 AS x) t346 ON t1.x = t346.x JOIN (SELECT 1 AS x) t347 ON t1.x = t347.x JOIN (SELECT 1 AS x) t348 ON t1.x = t348.x JOIN (SELECT 1 AS x) t349 ON t1.x = t349.x JOIN (SELECT 1 AS x) t350 ON t1.x = t350.x JOIN (SELECT 1 AS x) t351 ON t1.x = t351.x JOIN (SELECT 1 AS x) t352 ON t1.x = t352.x JOIN (SELECT 1 AS x) t353 ON t1.x = t353.x JOIN (SELECT 1 AS x) t354 ON t1.x = t354.x JOIN (SELECT 1 AS x) t355 ON t1.x = t355.x JOIN (SELECT 1 AS x) t356 ON t1.x = t356.x JOIN (SELECT 1 AS x) t357 ON t1.x = t357.x JOIN (SELECT 1 AS x) t358 ON t1.x = t358.x JOIN (SELECT 1 AS x) t359 ON t1.x = t359.x JOIN (SELECT 1 AS x) t360 ON t1.x = t360.x JOIN (SELECT 1 AS x) t361 ON t1.x = t361.x JOIN (SELECT 1 AS x) t362 ON t1.x = t362.x JOIN (SELECT 1 AS x) t363 ON t1.x = t363.x JOIN (SELECT 1 AS x) t364 ON t1.x = t364.x JOIN (SELECT 1 AS x) t365 ON t1.x = t365.x JOIN (SELECT 1 AS x) t366 ON t1.x = t366.x JOIN (SELECT 1 AS x) t367 ON t1.x = t367.x JOIN (SELECT 1 AS x) t368 ON t1.x = t368.x JOIN (SELECT 1 AS x) t369 ON t1.x = t369.x JOIN (SELECT 1 AS x) t370 ON t1.x = t370.x JOIN (SELECT 1 AS x) t371 ON t1.x = t371.x JOIN (SELECT 1 AS x) t372 ON t1.x = t372.x JOIN (SELECT 1 AS x) t373 ON t1.x = t373.x JOIN (SELECT 1 AS x) t374 ON t1.x = t374.x JOIN (SELECT 1 AS x) t375 ON t1.x = t375.x JOIN (SELECT 1 AS x) t376 ON t1.x = t376.x JOIN (SELECT 1 AS x) t377 ON t1.x = t377.x JOIN (SELECT 1 AS x) t378 ON t1.x = t378.x JOIN (SELECT 1 AS x) t379 ON t1.x = t379.x JOIN (SELECT 1 AS x) t380 ON t1.x = t380.x JOIN (SELECT 1 AS x) t381 ON t1.x = t381.x JOIN (SELECT 1 AS x) t382 ON t1.x = t382.x JOIN (SELECT 1 AS x) t383 ON t1.x = t383.x JOIN (SELECT 1 AS x) t384 ON t1.x = t384.x JOIN (SELECT 1 AS x) t385 ON t1.x = t385.x JOIN (SELECT 1 AS x) t386 ON t1.x = t386.x JOIN (SELECT 1 AS x) t387 ON t1.x = t387.x JOIN (SELECT 1 AS x) t388 ON t1.x = t388.x JOIN (SELECT 1 AS x) t389 ON t1.x = t389.x JOIN (SELECT 1 AS x) t390 ON t1.x = t390.x JOIN (SELECT 1 AS x) t391 ON t1.x = t391.x JOIN (SELECT 1 AS x) t392 ON t1.x = t392.x JOIN (SELECT 1 AS x) t393 ON t1.x = t393.x JOIN (SELECT 1 AS x) t394 ON t1.x = t394.x JOIN (SELECT 1 AS x) t395 ON t1.x = t395.x JOIN (SELECT 1 AS x) t396 ON t1.x = t396.x JOIN (SELECT 1 AS x) t397 ON t1.x = t397.x JOIN (SELECT 1 AS x) t398 ON t1.x = t398.x JOIN (SELECT 1 AS x) t399 ON t1.x = t399.x JOIN (SELECT 1 AS x) t400 ON t1.x = t400.x JOIN (SELECT 1 AS x) t401 ON t1.x = t401.x JOIN (SELECT 1 AS x) t402 ON t1.x = t402.x JOIN (SELECT 1 AS x) t403 ON t1.x = t403.x JOIN (SELECT 1 AS x) t404 ON t1.x = t404.x JOIN (SELECT 1 AS x) t405 ON t1.x = t405.x JOIN (SELECT 1 AS x) t406 ON t1.x = t406.x JOIN (SELECT 1 AS x) t407 ON t1.x = t407.x JOIN (SELECT 1 AS x) t408 ON t1.x = t408.x JOIN (SELECT 1 AS x) t409 ON t1.x = t409.x JOIN (SELECT 1 AS x) t410 ON t1.x = t410.x JOIN (SELECT 1 AS x) t411 ON t1.x = t411.x JOIN (SELECT 1 AS x) t412 ON t1.x = t412.x JOIN (SELECT 1 AS x) t413 ON t1.x = t413.x JOIN (SELECT 1 AS x) t414 ON t1.x = t414.x JOIN (SELECT 1 AS x) t415 ON t1.x = t415.x JOIN (SELECT 1 AS x) t416 ON t1.x = t416.x JOIN (SELECT 1 AS x) t417 ON t1.x = t417.x JOIN (SELECT 1 AS x) t418 ON t1.x = t418.x JOIN (SELECT 1 AS x) t419 ON t1.x = t419.x JOIN (SELECT 1 AS x) t420 ON t1.x = t420.x JOIN (SELECT 1 AS x) t421 ON t1.x = t421.x JOIN (SELECT 1 AS x) t422 ON t1.x = t422.x JOIN (SELECT 1 AS x) t423 ON t1.x = t423.x JOIN (SELECT 1 AS x) t424 ON t1.x = t424.x JOIN (SELECT 1 AS x) t425 ON t1.x = t425.x JOIN (SELECT 1 AS x) t426 ON t1.x = t426.x JOIN (SELECT 1 AS x) t427 ON t1.x = t427.x JOIN (SELECT 1 AS x) t428 ON t1.x = t428.x JOIN (SELECT 1 AS x) t429 ON t1.x = t429.x JOIN (SELECT 1 AS x) t430 ON t1.x = t430.x JOIN (SELECT 1 AS x) t431 ON t1.x = t431.x JOIN (SELECT 1 AS x) t432 ON t1.x = t432.x JOIN (SELECT 1 AS x) t433 ON t1.x = t433.x JOIN (SELECT 1 AS x) t434 ON t1.x = t434.x JOIN (SELECT 1 AS x) t435 ON t1.x = t435.x JOIN (SELECT 1 AS x) t436 ON t1.x = t436.x JOIN (SELECT 1 AS x) t437 ON t1.x = t437.x JOIN (SELECT 1 AS x) t438 ON t1.x = t438.x JOIN (SELECT 1 AS x) t439 ON t1.x = t439.x JOIN (SELECT 1 AS x) t440 ON t1.x = t440.x JOIN (SELECT 1 AS x) t441 ON t1.x = t441.x JOIN (SELECT 1 AS x) t442 ON t1.x = t442.x JOIN (SELECT 1 AS x) t443 ON t1.x = t443.x JOIN (SELECT 1 AS x) t444 ON t1.x = t444.x JOIN (SELECT 1 AS x) t445 ON t1.x = t445.x JOIN (SELECT 1 AS x) t446 ON t1.x = t446.x JOIN (SELECT 1 AS x) t447 ON t1.x = t447.x JOIN (SELECT 1 AS x) t448 ON t1.x = t448.x JOIN (SELECT 1 AS x) t449 ON t1.x = t449.x JOIN (SELECT 1 AS x) t450 ON t1.x = t450.x JOIN (SELECT 1 AS x) t451 ON t1.x = t451.x JOIN (SELECT 1 AS x) t452 ON t1.x = t452.x JOIN (SELECT 1 AS x) t453 ON t1.x = t453.x JOIN (SELECT 1 AS x) t454 ON t1.x = t454.x JOIN (SELECT 1 AS x) t455 ON t1.x = t455.x JOIN (SELECT 1 AS x) t456 ON t1.x = t456.x JOIN (SELECT 1 AS x) t457 ON t1.x = t457.x JOIN (SELECT 1 AS x) t458 ON t1.x = t458.x JOIN (SELECT 1 AS x) t459 ON t1.x = t459.x JOIN (SELECT 1 AS x) t460 ON t1.x = t460.x JOIN (SELECT 1 AS x) t461 ON t1.x = t461.x JOIN (SELECT 1 AS x) t462 ON t1.x = t462.x JOIN (SELECT 1 AS x) t463 ON t1.x = t463.x JOIN (SELECT 1 AS x) t464 ON t1.x = t464.x JOIN (SELECT 1 AS x) t465 ON t1.x = t465.x JOIN (SELECT 1 AS x) t466 ON t1.x = t466.x JOIN (SELECT 1 AS x) t467 ON t1.x = t467.x JOIN (SELECT 1 AS x) t468 ON t1.x = t468.x JOIN (SELECT 1 AS x) t469 ON t1.x = t469.x JOIN (SELECT 1 AS x) t470 ON t1.x = t470.x JOIN (SELECT 1 AS x) t471 ON t1.x = t471.x JOIN (SELECT 1 AS x) t472 ON t1.x = t472.x JOIN (SELECT 1 AS x) t473 ON t1.x = t473.x JOIN (SELECT 1 AS x) t474 ON t1.x = t474.x JOIN (SELECT 1 AS x) t475 ON t1.x = t475.x JOIN (SELECT 1 AS x) t476 ON t1.x = t476.x JOIN (SELECT 1 AS x) t477 ON t1.x = t477.x JOIN (SELECT 1 AS x) t478 ON t1.x = t478.x JOIN (SELECT 1 AS x) t479 ON t1.x = t479.x JOIN (SELECT 1 AS x) t480 ON t1.x = t480.x JOIN (SELECT 1 AS x) t481 ON t1.x = t481.x JOIN (SELECT 1 AS x) t482 ON t1.x = t482.x JOIN (SELECT 1 AS x) t483 ON t1.x = t483.x JOIN (SELECT 1 AS x) t484 ON t1.x = t484.x JOIN (SELECT 1 AS x) t485 ON t1.x = t485.x JOIN (SELECT 1 AS x) t486 ON t1.x = t486.x JOIN (SELECT 1 AS x) t487 ON t1.x = t487.x JOIN (SELECT 1 AS x) t488 ON t1.x = t488.x JOIN (SELECT 1 AS x) t489 ON t1.x = t489.x JOIN (SELECT 1 AS x) t490 ON t1.x = t490.x JOIN (SELECT 1 AS x) t491 ON t1.x = t491.x JOIN (SELECT 1 AS x) t492 ON t1.x = t492.x JOIN (SELECT 1 AS x) t493 ON t1.x = t493.x JOIN (SELECT 1 AS x) t494 ON t1.x = t494.x JOIN (SELECT 1 AS x) t495 ON t1.x = t495.x JOIN (SELECT 1 AS x) t496 ON t1.x = t496.x JOIN (SELECT 1 AS x) t497 ON t1.x = t497.x JOIN (SELECT 1 AS x) t498 ON t1.x = t498.x JOIN (SELECT 1 AS x) t499 ON t1.x = t499.x JOIN (SELECT 1 AS x) t500 ON t1.x = t500.x JOIN (SELECT 1 AS x) t501 ON t1.x = t501.x JOIN (SELECT 1 AS x) t502 ON t1.x = t502.x JOIN (SELECT 1 AS x) t503 ON t1.x = t503.x JOIN (SELECT 1 AS x) t504 ON t1.x = t504.x JOIN (SELECT 1 AS x) t505 ON t1.x = t505.x JOIN (SELECT 1 AS x) t506 ON t1.x = t506.x JOIN (SELECT 1 AS x) t507 ON t1.x = t507.x JOIN (SELECT 1 AS x) t508 ON t1.x = t508.x JOIN (SELECT 1 AS x) t509 ON t1.x = t509.x JOIN (SELECT 1 AS x) t510 ON t1.x = t510.x JOIN (SELECT 1 AS x) t511 ON t1.x = t511.x JOIN (SELECT 1 AS x) t512 ON t1.x = t512.x JOIN (SELECT 1 AS x) t513 ON t1.x = t513.x JOIN (SELECT 1 AS x) t514 ON t1.x = t514.x JOIN (SELECT 1 AS x) t515 ON t1.x = t515.x JOIN (SELECT 1 AS x) t516 ON t1.x = t516.x JOIN (SELECT 1 AS x) t517 ON t1.x = t517.x JOIN (SELECT 1 AS x) t518 ON t1.x = t518.x JOIN (SELECT 1 AS x) t519 ON t1.x = t519.x JOIN (SELECT 1 AS x) t520 ON t1.x = t520.x JOIN (SELECT 1 AS x) t521 ON t1.x = t521.x JOIN (SELECT 1 AS x) t522 ON t1.x = t522.x JOIN (SELECT 1 AS x) t523 ON t1.x = t523.x JOIN (SELECT 1 AS x) t524 ON t1.x = t524.x JOIN (SELECT 1 AS x) t525 ON t1.x = t525.x JOIN (SELECT 1 AS x) t526 ON t1.x = t526.x JOIN (SELECT 1 AS x) t527 ON t1.x = t527.x JOIN (SELECT 1 AS x) t528 ON t1.x = t528.x JOIN (SELECT 1 AS x) t529 ON t1.x = t529.x JOIN (SELECT 1 AS x) t530 ON t1.x = t530.x JOIN (SELECT 1 AS x) t531 ON t1.x = t531.x JOIN (SELECT 1 AS x) t532 ON t1.x = t532.x JOIN (SELECT 1 AS x) t533 ON t1.x = t533.x JOIN (SELECT 1 AS x) t534 ON t1.x = t534.x JOIN (SELECT 1 AS x) t535 ON t1.x = t535.x JOIN (SELECT 1 AS x) t536 ON t1.x = t536.x JOIN (SELECT 1 AS x) t537 ON t1.x = t537.x JOIN (SELECT 1 AS x) t538 ON t1.x = t538.x JOIN (SELECT 1 AS x) t539 ON t1.x = t539.x JOIN (SELECT 1 AS x) t540 ON t1.x = t540.x JOIN (SELECT 1 AS x) t541 ON t1.x = t541.x JOIN (SELECT 1 AS x) t542 ON t1.x = t542.x JOIN (SELECT 1 AS x) t543 ON t1.x = t543.x JOIN (SELECT 1 AS x) t544 ON t1.x = t544.x JOIN (SELECT 1 AS x) t545 ON t1.x = t545.x JOIN (SELECT 1 AS x) t546 ON t1.x = t546.x JOIN (SELECT 1 AS x) t547 ON t1.x = t547.x JOIN (SELECT 1 AS x) t548 ON t1.x = t548.x JOIN (SELECT 1 AS x) t549 ON t1.x = t549.x JOIN (SELECT 1 AS x) t550 ON t1.x = t550.x JOIN (SELECT 1 AS x) t551 ON t1.x = t551.x JOIN (SELECT 1 AS x) t552 ON t1.x = t552.x JOIN (SELECT 1 AS x) t553 ON t1.x = t553.x JOIN (SELECT 1 AS x) t554 ON t1.x = t554.x JOIN (SELECT 1 AS x) t555 ON t1.x = t555.x JOIN (SELECT 1 AS x) t556 ON t1.x = t556.x JOIN (SELECT 1 AS x) t557 ON t1.x = t557.x JOIN (SELECT 1 AS x) t558 ON t1.x = t558.x JOIN (SELECT 1 AS x) t559 ON t1.x = t559.x JOIN (SELECT 1 AS x) t560 ON t1.x = t560.x JOIN (SELECT 1 AS x) t561 ON t1.x = t561.x JOIN (SELECT 1 AS x) t562 ON t1.x = t562.x JOIN (SELECT 1 AS x) t563 ON t1.x = t563.x JOIN (SELECT 1 AS x) t564 ON t1.x = t564.x JOIN (SELECT 1 AS x) t565 ON t1.x = t565.x JOIN (SELECT 1 AS x) t566 ON t1.x = t566.x JOIN (SELECT 1 AS x) t567 ON t1.x = t567.x JOIN (SELECT 1 AS x) t568 ON t1.x = t568.x JOIN (SELECT 1 AS x) t569 ON t1.x = t569.x JOIN (SELECT 1 AS x) t570 ON t1.x = t570.x JOIN (SELECT 1 AS x) t571 ON t1.x = t571.x JOIN (SELECT 1 AS x) t572 ON t1.x = t572.x JOIN (SELECT 1 AS x) t573 ON t1.x = t573.x JOIN (SELECT 1 AS x) t574 ON t1.x = t574.x JOIN (SELECT 1 AS x) t575 ON t1.x = t575.x JOIN (SELECT 1 AS x) t576 ON t1.x = t576.x JOIN (SELECT 1 AS x) t577 ON t1.x = t577.x JOIN (SELECT 1 AS x) t578 ON t1.x = t578.x JOIN (SELECT 1 AS x) t579 ON t1.x = t579.x JOIN (SELECT 1 AS x) t580 ON t1.x = t580.x JOIN (SELECT 1 AS x) t581 ON t1.x = t581.x JOIN (SELECT 1 AS x) t582 ON t1.x = t582.x JOIN (SELECT 1 AS x) t583 ON t1.x = t583.x JOIN (SELECT 1 AS x) t584 ON t1.x = t584.x JOIN (SELECT 1 AS x) t585 ON t1.x = t585.x JOIN (SELECT 1 AS x) t586 ON t1.x = t586.x JOIN (SELECT 1 AS x) t587 ON t1.x = t587.x JOIN (SELECT 1 AS x) t588 ON t1.x = t588.x JOIN (SELECT 1 AS x) t589 ON t1.x = t589.x JOIN (SELECT 1 AS x) t590 ON t1.x = t590.x JOIN (SELECT 1 AS x) t591 ON t1.x = t591.x JOIN (SELECT 1 AS x) t592 ON t1.x = t592.x JOIN (SELECT 1 AS x) t593 ON t1.x = t593.x JOIN (SELECT 1 AS x) t594 ON t1.x = t594.x JOIN (SELECT 1 AS x) t595 ON t1.x = t595.x JOIN (SELECT 1 AS x) t596 ON t1.x = t596.x JOIN (SELECT 1 AS x) t597 ON t1.x = t597.x JOIN (SELECT 1 AS x) t598 ON t1.x = t598.x JOIN (SELECT 1 AS x) t599 ON t1.x = t599.x JOIN (SELECT 1 AS x) t600 ON t1.x = t600.x JOIN (SELECT 1 AS x) t601 ON t1.x = t601.x JOIN (SELECT 1 AS x) t602 ON t1.x = t602.x JOIN (SELECT 1 AS x) t603 ON t1.x = t603.x JOIN (SELECT 1 AS x) t604 ON t1.x = t604.x JOIN (SELECT 1 AS x) t605 ON t1.x = t605.x JOIN (SELECT 1 AS x) t606 ON t1.x = t606.x JOIN (SELECT 1 AS x) t607 ON t1.x = t607.x JOIN (SELECT 1 AS x) t608 ON t1.x = t608.x JOIN (SELECT 1 AS x) t609 ON t1.x = t609.x JOIN (SELECT 1 AS x) t610 ON t1.x = t610.x JOIN (SELECT 1 AS x) t611 ON t1.x = t611.x JOIN (SELECT 1 AS x) t612 ON t1.x = t612.x JOIN (SELECT 1 AS x) t613 ON t1.x = t613.x JOIN (SELECT 1 AS x) t614 ON t1.x = t614.x JOIN (SELECT 1 AS x) t615 ON t1.x = t615.x JOIN (SELECT 1 AS x) t616 ON t1.x = t616.x JOIN (SELECT 1 AS x) t617 ON t1.x = t617.x JOIN (SELECT 1 AS x) t618 ON t1.x = t618.x JOIN (SELECT 1 AS x) t619 ON t1.x = t619.x JOIN (SELECT 1 AS x) t620 ON t1.x = t620.x JOIN (SELECT 1 AS x) t621 ON t1.x = t621.x JOIN (SELECT 1 AS x) t622 ON t1.x = t622.x JOIN (SELECT 1 AS x) t623 ON t1.x = t623.x JOIN (SELECT 1 AS x) t624 ON t1.x = t624.x JOIN (SELECT 1 AS x) t625 ON t1.x = t625.x JOIN (SELECT 1 AS x) t626 ON t1.x = t626.x JOIN (SELECT 1 AS x) t627 ON t1.x = t627.x JOIN (SELECT 1 AS x) t628 ON t1.x = t628.x JOIN (SELECT 1 AS x) t629 ON t1.x = t629.x JOIN (SELECT 1 AS x) t630 ON t1.x = t630.x JOIN (SELECT 1 AS x) t631 ON t1.x = t631.x JOIN (SELECT 1 AS x) t632 ON t1.x = t632.x JOIN (SELECT 1 AS x) t633 ON t1.x = t633.x JOIN (SELECT 1 AS x) t634 ON t1.x = t634.x JOIN (SELECT 1 AS x) t635 ON t1.x = t635.x JOIN (SELECT 1 AS x) t636 ON t1.x = t636.x JOIN (SELECT 1 AS x) t637 ON t1.x = t637.x JOIN (SELECT 1 AS x) t638 ON t1.x = t638.x JOIN (SELECT 1 AS x) t639 ON t1.x = t639.x JOIN (SELECT 1 AS x) t640 ON t1.x = t640.x JOIN (SELECT 1 AS x) t641 ON t1.x = t641.x JOIN (SELECT 1 AS x) t642 ON t1.x = t642.x JOIN (SELECT 1 AS x) t643 ON t1.x = t643.x JOIN (SELECT 1 AS x) t644 ON t1.x = t644.x JOIN (SELECT 1 AS x) t645 ON t1.x = t645.x JOIN (SELECT 1 AS x) t646 ON t1.x = t646.x JOIN (SELECT 1 AS x) t647 ON t1.x = t647.x JOIN (SELECT 1 AS x) t648 ON t1.x = t648.x JOIN (SELECT 1 AS x) t649 ON t1.x = t649.x JOIN (SELECT 1 AS x) t650 ON t1.x = t650.x JOIN (SELECT 1 AS x) t651 ON t1.x = t651.x JOIN (SELECT 1 AS x) t652 ON t1.x = t652.x JOIN (SELECT 1 AS x) t653 ON t1.x = t653.x JOIN (SELECT 1 AS x) t654 ON t1.x = t654.x JOIN (SELECT 1 AS x) t655 ON t1.x = t655.x JOIN (SELECT 1 AS x) t656 ON t1.x = t656.x JOIN (SELECT 1 AS x) t657 ON t1.x = t657.x JOIN (SELECT 1 AS x) t658 ON t1.x = t658.x JOIN (SELECT 1 AS x) t659 ON t1.x = t659.x JOIN (SELECT 1 AS x) t660 ON t1.x = t660.x JOIN (SELECT 1 AS x) t661 ON t1.x = t661.x JOIN (SELECT 1 AS x) t662 ON t1.x = t662.x JOIN (SELECT 1 AS x) t663 ON t1.x = t663.x JOIN (SELECT 1 AS x) t664 ON t1.x = t664.x JOIN (SELECT 1 AS x) t665 ON t1.x = t665.x JOIN (SELECT 1 AS x) t666 ON t1.x = t666.x settings join_algorithm='hash' diff --git a/tests/performance/string_join.xml b/tests/performance/string_join.xml index a7d6837cf6c..b3ecced4600 100644 --- a/tests/performance/string_join.xml +++ b/tests/performance/string_join.xml @@ -9,7 +9,8 @@ 1 - SELECT 1 FROM hits_10m_words AS l ANY LEFT JOIN hits_10m_words AS r USING (word) FORMAT Null + SELECT 1 FROM hits_10m_words AS l ANY LEFT JOIN hits_10m_words AS r USING (word) FORMAT Null settings join_algorithm='hash' + SELECT 1 FROM hits_10m_words AS l ANY LEFT JOIN hits_10m_words AS r USING (word) FORMAT Null settings join_algorithm='parallel_hash' SELECT 1 FROM strings AS l ANY LEFT JOIN strings AS r USING (short) FORMAT Null SELECT 1 FROM strings AS l ANY LEFT JOIN strings AS r USING (long) FORMAT Null From 74fd7fa01d42fd328dc2905e65719997c2cb20ab Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 3 Nov 2024 08:59:33 -0800 Subject: [PATCH 089/267] Add camelCase alias for anyRespectNulls and anyLastRespectNulls --- src/AggregateFunctions/AggregateFunctionAnyRespectNulls.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/AggregateFunctions/AggregateFunctionAnyRespectNulls.cpp b/src/AggregateFunctions/AggregateFunctionAnyRespectNulls.cpp index 0b6642bffac..cc0d2cb38c8 100644 --- a/src/AggregateFunctions/AggregateFunctionAnyRespectNulls.cpp +++ b/src/AggregateFunctions/AggregateFunctionAnyRespectNulls.cpp @@ -223,9 +223,11 @@ void registerAggregateFunctionsAnyRespectNulls(AggregateFunctionFactory & factor factory.registerFunction("any_respect_nulls", {createAggregateFunctionAnyRespectNulls, default_properties_for_respect_nulls}); factory.registerAlias("any_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::Case::Insensitive); factory.registerAlias("first_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::Case::Insensitive); + factory.registerAlias("anyRespectNulls", "any_respect_nulls", AggregateFunctionFactory::Case::Sensitive); factory.registerFunction("anyLast_respect_nulls", {createAggregateFunctionAnyLastRespectNulls, default_properties_for_respect_nulls}); factory.registerAlias("last_value_respect_nulls", "anyLast_respect_nulls", AggregateFunctionFactory::Case::Insensitive); + factory.registerAlias("anyLastRespectNulls", "anyLast_respect_nulls", AggregateFunctionFactory::Case::Sensitive); /// Must happen after registering any and anyLast factory.registerNullsActionTransformation("any", "any_respect_nulls"); From 44130d67650334d41f5ef5bd1c0967314f4738fb Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 3 Nov 2024 08:59:56 -0800 Subject: [PATCH 090/267] Add small note in docs for the alias --- docs/en/sql-reference/aggregate-functions/reference/anylast.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast.md b/docs/en/sql-reference/aggregate-functions/reference/anylast.md index 202d2e9fb10..f5b75e63399 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anylast.md @@ -18,6 +18,8 @@ anyLast(column) [RESPECT NULLS] :::note Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not. + +Alias: anyLastRepectNulls ::: **Returned value** From 2892aa11e55515000b90c2e82922838cda532596 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 3 Nov 2024 16:42:07 +0100 Subject: [PATCH 091/267] squash follow up --- src/Processors/QueryPlan/JoinStep.cpp | 16 +++++++++++----- src/Processors/Transforms/SquashingTransform.cpp | 2 +- src/QueryPipeline/QueryPipelineBuilder.cpp | 4 ++-- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 018b52a5c68..55cc1020095 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -1,9 +1,10 @@ -#include -#include -#include +#include #include #include -#include +#include +#include +#include +#include #include #include @@ -63,7 +64,7 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines return joined_pipeline; } - return QueryPipelineBuilder::joinPipelinesRightLeft( + auto ppl = QueryPipelineBuilder::joinPipelinesRightLeft( std::move(pipelines[0]), std::move(pipelines[1]), join, @@ -72,6 +73,11 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines max_streams, keep_left_read_in_order, &processors); + + ppl->addSimpleTransform([&](const Block & header) + { return std::make_shared(header, max_block_size / 2, 1_MiB / 2); }); + + return ppl; } bool JoinStep::allowPushDownToRight() const diff --git a/src/Processors/Transforms/SquashingTransform.cpp b/src/Processors/Transforms/SquashingTransform.cpp index 490a57d4e23..0749a305d0e 100644 --- a/src/Processors/Transforms/SquashingTransform.cpp +++ b/src/Processors/Transforms/SquashingTransform.cpp @@ -78,7 +78,7 @@ Chunk SimpleSquashingChunksTransform::generate() bool SimpleSquashingChunksTransform::canGenerate() { - return !squashed_chunk.empty(); + return squashed_chunk.hasRows(); } Chunk SimpleSquashingChunksTransform::getRemaining() diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 463d18ed7a2..157577d733d 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -442,7 +442,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Processors processors; for (auto & outport : outports) { - auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 0); + auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 1_MiB / 2); connect(*outport, squashing->getInputs().front()); processors.emplace_back(squashing); auto adding_joined = std::make_shared(right->getHeader(), join); @@ -501,7 +501,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Block left_header = left->getHeader(); for (size_t i = 0; i < num_streams; ++i) { - auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 0); + auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 1_MiB / 2); connect(**lit, squashing->getInputs().front()); auto joining = std::make_shared( From c6384291e1c96a795020ceca425b1fdff5efe3fe Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 4 Nov 2024 14:31:05 +0100 Subject: [PATCH 092/267] small fix --- src/Interpreters/HashJoin/ScatteredBlock.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index 729377f6758..d94497e304b 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -302,8 +302,6 @@ struct ScatteredBlock : private boost::noncopyable /// Cut first `num_rows` rows from `block` in place and returns block with remaining rows ScatteredBlock cut(size_t num_rows) { - SCOPE_EXIT(filterBySelector()); - if (num_rows >= rows()) return ScatteredBlock{Block{}}; From 935a29485c60038b14e4e8c87c8e021fc05f7928 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 14:32:54 +0000 Subject: [PATCH 093/267] Fix logs --- src/Planner/findParallelReplicasQuery.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 5db67d7c793..314a7f06137 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -273,7 +273,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; // return query_node; } @@ -427,10 +427,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); + // if (res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From a6b55563c73ff10b42569d17ee24457ffff91e4e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 18:32:05 +0000 Subject: [PATCH 094/267] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 37 +++++----- .../03254_pr_join_on_dups.reference | 72 +++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 12 ++++ 3 files changed, 103 insertions(+), 18 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index d79aa626d5e..c2acbd661c8 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - [[maybe_unused]] const QueryNode & parent_query_node, + const QueryTreeNodePtr & parent_join_tree, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,17 +958,22 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "parallel_replicas_node={} parent_query_node={}", - UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), - UInt64(&parent_query_node)); - - // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); - /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { + const bool allow_parallel_replicas_for_table_expression = [](const QueryTreeNodePtr & join_tree_node) + { + const JoinNode * join_node = join_tree_node->as(); + if (!join_node) + return true; + + const auto join_kind = join_node->getKind(); + if (join_kind == JoinKind::Left || join_kind == JoinKind::Right || join_kind == JoinKind::Inner) + return true; + + return false; + }(parent_join_tree); + if (query_context->canUseParallelReplicasCustomKey() && query_context->getClientInfo().distributed_depth == 0) { if (auto cluster = query_context->getClusterForParallelReplicas(); @@ -991,11 +996,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) - // && (!table_join_node - // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node - // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context) && allow_parallel_replicas_for_table_expression) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1828,8 +1829,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - const QueryNode & parent_query_node = query_node->as(); - auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); + const QueryTreeNodePtr & join_tree_node = query_node->as().getJoinTree(); + auto table_expressions_stack = buildTableExpressionsStack(join_tree_node); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1866,7 +1867,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, auto left_table_expression = table_expressions_stack.front(); auto left_table_expression_query_plan = buildQueryPlanForTableExpression( left_table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, @@ -1941,7 +1942,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); query_plans_stack.push_back(buildQueryPlanForTableExpression( table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 95cb0d8cae2..58602bafb5d 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,6 +88,34 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -199,3 +227,47 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 5f2f209d0b0..222f7693090 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -30,6 +30,11 @@ select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; + select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable'; @@ -55,5 +60,12 @@ select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; + drop table X sync; drop table Y sync; From 8c5ab63345f385a75caa943f4b50169a13e3b470 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 22:37:57 +0000 Subject: [PATCH 095/267] Cleanup --- src/Planner/Planner.cpp | 3 +-- src/Planner/Planner.h | 1 - src/Planner/PlannerJoinTree.cpp | 6 ------ src/Planner/findParallelReplicasQuery.cpp | 22 +--------------------- 4 files changed, 2 insertions(+), 30 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 4b5a2b903c0..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,6 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - // , root_planner(true) { } @@ -1538,7 +1537,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index 8d771c343c3..ae78f05cbd4 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,6 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - // bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index c2acbd661c8..c1b8f999f22 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -669,12 +669,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "pr_enabled={} table_expression:\n{}", - settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), - table_expression->dumpTree()); - auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 314a7f06137..bda96f0c31f 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,18 +265,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; - } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; - // return query_node; - } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -310,17 +303,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - // if (!res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - std::stack join_nodes; while (query_tree_node || !join_nodes.empty()) { @@ -426,12 +413,7 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - const auto * res = findTableForParallelReplicas(query_tree_node.get()); - // if (res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); - return res; + return findTableForParallelReplicas(query_tree_node.get()); } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -439,8 +421,6 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); From e198b205092dcb0bec14b8a3a08763cc68a4a1b9 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 29 Oct 2024 21:09:03 +0000 Subject: [PATCH 096/267] CI: Stateless Tests with praktika --- .github/workflows/pr.yaml | 168 ++++++++++++++++++-- ci/__init__.py | 0 ci/docker/stateless-test/Dockerfile | 107 +++++++++++++ ci/docker/stateless-test/requirements.txt | 5 + ci/jobs/__init__.py | 0 ci/jobs/build_clickhouse.py | 65 ++++++-- ci/jobs/fast_test.py | 117 +------------- ci/jobs/functional_stateless_tests.py | 119 +++++++++++++- ci/jobs/scripts/__init__.py | 0 ci/jobs/scripts/clickhouse_proc.py | 144 +++++++++++++++++ ci/jobs/scripts/functional_tests_results.py | 3 + ci/praktika/_settings.py | 3 + ci/praktika/hook_html.py | 110 ++++++++++--- ci/praktika/job.py | 46 +++++- ci/praktika/json.html | 156 +++++++++++------- ci/praktika/mangle.py | 1 - ci/praktika/native_jobs.py | 5 +- ci/praktika/param.py | 8 + ci/praktika/result.py | 19 ++- ci/praktika/runner.py | 20 ++- ci/praktika/s3.py | 2 +- ci/praktika/workflow.py | 1 + ci/praktika/yaml_generator.py | 3 + ci/settings/definitions.py | 38 +++-- ci/workflows/pull_request.py | 53 ++++-- tests/clickhouse-test | 11 +- tests/config/config.d/ssl_certs.xml | 4 +- tests/config/install.sh | 24 ++- tests/docker_scripts/setup_minio.sh | 28 ++-- 29 files changed, 955 insertions(+), 305 deletions(-) create mode 100644 ci/__init__.py create mode 100644 ci/docker/stateless-test/Dockerfile create mode 100644 ci/docker/stateless-test/requirements.txt create mode 100644 ci/jobs/__init__.py create mode 100644 ci/jobs/scripts/__init__.py create mode 100644 ci/jobs/scripts/clickhouse_proc.py create mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 34c794f6088..0c3f74aeac8 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -30,6 +30,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -68,6 +71,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -106,6 +112,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -144,6 +153,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -172,16 +184,19 @@ jobs: python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - build_amd64_debug: + build_amd_debug: runs-on: [builder] needs: [config_workflow, docker_builds] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} - name: "Build amd64 debug" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -205,21 +220,24 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug: + build_amd_release: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd64_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} - name: "Stateless tests (amd, debug)" + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -243,14 +261,137 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_1_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_2_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_non_parallel: + runs-on: [style-checker] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd, debug) (non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -258,6 +399,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | diff --git a/ci/__init__.py b/ci/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile new file mode 100644 index 00000000000..4abd8204f1d --- /dev/null +++ b/ci/docker/stateless-test/Dockerfile @@ -0,0 +1,107 @@ +# docker build -t clickhouse/stateless-test . +FROM ubuntu:22.04 + +# ARG for quick switch to a given ubuntu mirror +ARG apt_archive="http://archive.ubuntu.com" +RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list + +ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" + +# moreutils - provides ts fo FT +# expect, bzip2 - requried by FT +# bsdmainutils - provides hexdump for FT + +# golang version 1.13 on Ubuntu 20 is enough for tests +RUN apt-get update -y \ + && env DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + awscli \ + brotli \ + lz4 \ + expect \ + moreutils \ + bzip2 \ + bsdmainutils \ + golang \ + lsof \ + mysql-client=8.0* \ + ncdu \ + netcat-openbsd \ + nodejs \ + npm \ + odbcinst \ + openjdk-11-jre-headless \ + openssl \ + postgresql-client \ + python3 \ + python3-pip \ + qemu-user-static \ + sqlite3 \ + sudo \ + tree \ + unixodbc \ + rustc \ + cargo \ + zstd \ + file \ + jq \ + pv \ + zip \ + unzip \ + p7zip-full \ + curl \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* + +ARG PROTOC_VERSION=25.1 +RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \ + && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \ + && rm protoc-${PROTOC_VERSION}-linux-x86_64.zip + +COPY requirements.txt / +RUN pip3 install --no-cache-dir -r /requirements.txt + +RUN mkdir -p /tmp/clickhouse-odbc-tmp \ + && cd /tmp/clickhouse-odbc-tmp \ + && curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \ + && mkdir /usr/local/lib64 -p \ + && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \ + && odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \ + && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ + && sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \ + && rm -rf /tmp/clickhouse-odbc-tmp + +ENV TZ=Europe/Amsterdam +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +ENV NUM_TRIES=1 + +# Unrelated to vars in setup_minio.sh, but should be the same there +# to have the same binaries for local running scenario +ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z +ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z +ARG TARGETARCH + +# Download Minio-related binaries +RUN arch=${TARGETARCH:-amd64} \ + && curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o /minio \ + && curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o /mc \ + && chmod +x /mc /minio + +ENV MINIO_ROOT_USER="clickhouse" +ENV MINIO_ROOT_PASSWORD="clickhouse" + +# for minio to work without root +RUN chmod 777 /home +ENV HOME="/home" +ENV TEMP_DIR="/tmp/praktika" +ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" + +RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ + && tar -xvf hadoop-3.3.1.tar.gz \ + && rm -rf hadoop-3.3.1.tar.gz + + +RUN npm install -g azurite@3.30.0 \ + && npm install -g tslib && npm install -g node diff --git a/ci/docker/stateless-test/requirements.txt b/ci/docker/stateless-test/requirements.txt new file mode 100644 index 00000000000..d556d23485f --- /dev/null +++ b/ci/docker/stateless-test/requirements.txt @@ -0,0 +1,5 @@ +Jinja2==3.1.3 +numpy==1.26.4 +requests==2.32.3 +pandas==1.5.3 +scipy==1.12.0 diff --git a/ci/jobs/__init__.py b/ci/jobs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index cfa358b4059..3db88938f23 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,5 +1,6 @@ import argparse +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -14,7 +15,9 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--build-type", + help="Type: __", + default=None, ) parser.add_argument( "--param", @@ -24,6 +27,18 @@ def parse_args(): return parser.parse_args() +CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ +-DCMAKE_BUILD_TYPE={BUILD_TYPE} \ +-DSANITIZE={SANITIZER} \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ +-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ +{AUX_DEFS} \ +-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 \ +-DCOMPILER_CACHE={CACHE_TYPE} \ +-DENABLE_BUILD_PROFILING=1 {DIR}""" + + def main(): args = parse_args() @@ -42,20 +57,45 @@ def main(): cmake_build_type = "Release" sanitizer = "" - if "debug" in args.BUILD_TYPE.lower(): - print("Build type set: debug") - cmake_build_type = "Debug" + if args.build_type and get_param(): + assert ( + False + ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - if "asan" in args.BUILD_TYPE.lower(): - print("Sanitizer set: address") - sanitizer = "address" + build_type = args.build_type or get_param() + assert ( + build_type + ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" + build_type = build_type.lower() # if Environment.is_local_run(): # build_cache_type = "disabled" # else: - build_cache_type = "sccache" + CACHE_TYPE = "sccache" + + if "debug" in build_type: + print("Build type set: debug") + BUILD_TYPE = "Debug" + AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + elif "release" in build_type: + print("Build type set: release") + BUILD_TYPE = "None" + AUX_DEFS = " -DENABLE_TESTS=1 " + + if "asan" in build_type: + print("Sanitizer set: address") + SANITIZER = "address" + else: + SANITIZER = "" + + cmake_cmd = CMAKE_CMD.format( + BUILD_TYPE=BUILD_TYPE, + CACHE_TYPE=CACHE_TYPE, + SANITIZER=SANITIZER, + AUX_DEFS=AUX_DEFS, + DIR=Utils.cwd(), + ) - current_directory = Utils.cwd() build_dir = f"{Settings.TEMP_DIR}/build" res = True @@ -75,12 +115,7 @@ def main(): results.append( Result.create_from_command_execution( name="Cmake configuration", - command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \ - -DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ - -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ - -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ - -DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \ - -DENABLE_BUILD_PROFILING=1 {current_directory}", + command=cmake_cmd, workdir=build_dir, with_log=True, ) diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index dc5e1c975a6..cb7d925fead 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -1,120 +1,13 @@ import argparse -import threading -from pathlib import Path from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -class ClickHouseProc: - def __init__(self): - self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" - self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" - self.config_file = f"{self.ch_config_dir}/config.xml" - self.user_files_path = f"{self.ch_config_dir}/user_files" - self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" - self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" - self.proc = None - self.pid = 0 - nproc = int(Utils.cpu_count() / 2) - self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ - | tee -a \"{self.test_output_file}\"" - # TODO: store info in case of failure - self.info = "" - self.info_file = "" - - Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) - Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) - Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) - Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - - def start(self): - print("Starting ClickHouse server") - Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=True - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - - # self.proc = Shell.run_async(self.command, verbose=True) - - started = False - try: - for _ in range(5): - pid = Shell.get_output(f"cat {self.pid_file}").strip() - if not pid: - Utils.sleep(1) - continue - started = True - print(f"Got pid from fs [{pid}]") - _ = int(pid) - break - except Exception: - pass - - if not started: - stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" - stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" - Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) - return False - - print(f"ClickHouse server started successfully, pid [{pid}]") - return True - - def wait_ready(self): - res, out, err = 0, "", "" - attempts = 30 - delay = 2 - for attempt in range(attempts): - res, out, err = Shell.get_res_stdout_stderr( - 'clickhouse-client --query "select 1"', verbose=True - ) - if out.strip() == "1": - print("Server ready") - break - else: - print(f"Server not ready, wait") - Utils.sleep(delay) - else: - Utils.print_formatted_error( - f"Server not ready after [{attempts*delay}s]", out, err - ) - return False - return True - - def run_fast_test(self): - if Path(self.test_output_file).exists(): - Path(self.test_output_file).unlink() - exit_code = Shell.run(self.fast_test_command) - return exit_code == 0 - - def terminate(self): - print("Terminate ClickHouse process") - timeout = 10 - if self.proc: - Utils.terminate_process_group(self.proc.pid) - - self.proc.terminate() - try: - self.proc.wait(timeout=10) - print(f"Process {self.proc.pid} terminated gracefully.") - except Exception: - print( - f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." - ) - Utils.terminate_process_group(self.proc.pid, force=True) - self.proc.wait() # Wait for the process to be fully killed - print(f"Process {self.proc} was killed.") - - def clone_submodules(): submodules_to_update = [ "contrib/sysroot", @@ -240,7 +133,7 @@ def main(): Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}") results.append( Result.create_from_command_execution( - name="Checkout Submodules for Minimal Build", + name="Checkout Submodules", command=clone_submodules, ) ) @@ -295,8 +188,8 @@ def main(): if res and JobStages.CONFIG in stages: commands = [ f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", - f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", - f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client", + f"cp ./programs/server/config.xml ./programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --fast-test", # f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/", f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml", update_path_ch_config, @@ -310,7 +203,7 @@ def main(): ) res = results[-1].is_ok() - CH = ClickHouseProc() + CH = ClickHouseProc(fast_test=True) if res and JobStages.TEST in stages: stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index dfdd5821a19..d77522ed73a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,31 +1,78 @@ import argparse +import os +from pathlib import Path +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc +from ci.jobs.scripts.functional_tests_results import FTResultsProcessor +from ci.settings.definitions import azure_secret + class JobStages(metaclass=MetaClasses.WithIter): - CHECKOUT_SUBMODULES = "checkout" - CMAKE = "cmake" - BUILD = "build" + INSTALL_CLICKHOUSE = "install" + START = "start" + TEST = "test" def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) parser.add_argument("--param", help="Optional custom job start stage", default=None) return parser.parse_args() +def run_stateless_test( + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int +): + assert not (no_parallel and no_sequiential) + test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + aux = "" + nproc = int(Utils.cpu_count() / 2) + if batch_num and batch_total: + aux = f"--run-by-hash-total {batch_total} --run-by-hash-num {batch_num-1}" + statless_test_command = f"clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ + --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ + {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ + --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ + --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{test_output_file}\"" + if Path(test_output_file).exists(): + Path(test_output_file).unlink() + Shell.run(statless_test_command, verbose=True) + + def main(): args = parse_args() + params = get_param().split(" ") + parallel_or_sequential = None + no_parallel = False + no_sequential = False + if params: + parallel_or_sequential = params[0] + if len(params) > 1: + batch_num, total_batches = map(int, params[1].split("/")) + else: + batch_num, total_batches = 0, 0 + if parallel_or_sequential: + no_parallel = parallel_or_sequential == "non-parallel" + no_sequential = parallel_or_sequential == "parallel" + + os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + verbose=True, + ) stop_watch = Utils.Stopwatch() stages = list(JobStages) - stage = args.param or JobStages.CHECKOUT_SUBMODULES + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" print(f"Job will start from stage [{stage}]") @@ -36,9 +83,65 @@ def main(): res = True results = [] - if res and JobStages.CHECKOUT_SUBMODULES in stages: - info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") - results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + + if res and JobStages.INSTALL_CLICKHOUSE in stages: + commands = [ + f"chmod +x {Settings.INPUT_DIR}/clickhouse", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", + f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # update_path_ch_config, + f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"clickhouse-server --version", + ] + results.append( + Result.create_from_command_execution( + name="Install ClickHouse", command=commands, with_log=True + ) + ) + res = results[-1].is_ok() + + CH = ClickHouseProc() + if res and JobStages.START in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Start ClickHouse Server" + print(step_name) + res = res and CH.start_minio() + res = res and CH.start() + res = res and CH.wait_ready() + results.append( + Result.create_from( + name=step_name, + status=res, + stopwatch=stop_watch_, + files=( + [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] + if not res + else [] + ), + ) + ) + res = results[-1].is_ok() + + if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Tests" + print(step_name) + run_stateless_test( + no_parallel=no_parallel, + no_sequiential=no_sequential, + batch_num=batch_num, + batch_total=total_batches, + ) + results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/scripts/__init__.py b/ci/jobs/scripts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py new file mode 100644 index 00000000000..cc822eab693 --- /dev/null +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -0,0 +1,144 @@ +import threading +import time +from pathlib import Path + +from praktika.settings import Settings +from praktika.utils import Shell, Utils + + +class ClickHouseProc: + BACKUPS_XML = """ + + + local + {CH_RUNTIME_DIR}/var/lib/clickhouse/disks/backups/ + + +""" + + def __init__(self, fast_test=False): + self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" + self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" + self.config_file = f"{self.ch_config_dir}/config.xml" + self.user_files_path = f"{self.ch_config_dir}/user_files" + self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" + self.proc = None + self.pid = 0 + nproc = int(Utils.cpu_count() / 2) + self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{self.test_output_file}\"" + # TODO: store info in case of failure + self.info = "" + self.info_file = "" + + self.minio_cmd = f"tests/docker_scripts/setup_minio.sh stateless 2>&1 > {Settings.OUTPUT_DIR}/minio.log" + + Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) + Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) + Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) + Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") + + if not fast_test: + with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + file.write(self.BACKUPS_XML) + + self.minio_proc = None + + def start_minio(self): + print("Starting minio") + + def run_minio(): + self.minio_proc = Shell.run_async( + self.minio_cmd, verbose=True, suppress_output=True + ) + + thread = threading.Thread(target=run_minio) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + time.sleep(5) + return thread.is_alive() + + def start(self): + print("Starting ClickHouse server") + Shell.check(f"rm {self.pid_file}") + + def run_clickhouse(): + self.proc = Shell.run_async( + self.command, verbose=True, suppress_output=False + ) + + thread = threading.Thread(target=run_clickhouse) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + + started = False + try: + for _ in range(5): + pid = Shell.get_output(f"cat {self.pid_file}").strip() + if not pid: + Utils.sleep(1) + continue + started = True + print(f"Got pid from fs [{pid}]") + _ = int(pid) + break + except Exception: + pass + + if not started: + stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" + stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" + Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) + return False + + print(f"ClickHouse server started successfully, pid [{pid}]") + return True + + def wait_ready(self): + res, out, err = 0, "", "" + attempts = 30 + delay = 2 + for attempt in range(attempts): + res, out, err = Shell.get_res_stdout_stderr( + 'clickhouse-client --query "select 1"', verbose=True + ) + if out.strip() == "1": + print("Server ready") + break + else: + print(f"Server not ready, wait") + Utils.sleep(delay) + else: + Utils.print_formatted_error( + f"Server not ready after [{attempts*delay}s]", out, err + ) + return False + return True + + def run_fast_test(self): + if Path(self.test_output_file).exists(): + Path(self.test_output_file).unlink() + exit_code = Shell.run(self.fast_test_command) + return exit_code == 0 + + def terminate(self): + print("Terminate ClickHouse process") + timeout = 10 + if self.proc: + Utils.terminate_process_group(self.proc.pid) + + self.proc.terminate() + try: + self.proc.wait(timeout=10) + print(f"Process {self.proc.pid} terminated gracefully.") + except Exception: + print( + f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." + ) + Utils.terminate_process_group(self.proc.pid, force=True) + self.proc.wait() # Wait for the process to be fully killed + print(f"Process {self.proc} was killed.") + + if self.minio_proc: + Utils.terminate_process_group(self.minio_proc.pid) diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index aba3e4f7f5b..06989fb0a44 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -232,6 +232,8 @@ class FTResultsProcessor: else: pass + info = f"Total: {s.total - s.skipped}, Failed: {s.failed}" + # TODO: !!! # def test_result_comparator(item): # # sort by status then by check name @@ -253,6 +255,7 @@ class FTResultsProcessor: results=test_results, status=state, files=[self.tests_output_file], + info=info, with_info_from_results=False, ) diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 1777257f484..17da1519e37 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -80,6 +80,8 @@ class _Settings: CI_DB_TABLE_NAME = "" CI_DB_INSERT_TIMEOUT_SEC = 5 + DISABLE_MERGE_COMMIT = True + _USER_DEFINED_SETTINGS = [ "S3_ARTIFACT_PATH", @@ -112,6 +114,7 @@ _USER_DEFINED_SETTINGS = [ "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", ] diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index f4bd4435511..cea84192d0d 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -11,50 +11,112 @@ from praktika.result import Result, ResultInfo from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Utils @dataclasses.dataclass class GitCommit: - date: str - message: str + # date: str + # message: str sha: str @staticmethod - def from_json(json_data: str) -> List["GitCommit"]: + def from_json(file) -> List["GitCommit"]: commits = [] + json_data = None try: - data = json.loads(json_data) - + with open(file, "r", encoding="utf-8") as f: + json_data = json.load(f) commits = [ GitCommit( - message=commit["messageHeadline"], - sha=commit["oid"], - date=commit["committedDate"], + # message=commit["messageHeadline"], + sha=commit["sha"], + # date=commit["committedDate"], ) - for commit in data.get("commits", []) + for commit in json_data ] except Exception as e: print( - f"ERROR: Failed to deserialize commit's data: [{json_data}], ex: [{e}]" + f"ERROR: Failed to deserialize commit's data [{json_data}], ex: [{e}]" ) return commits + @classmethod + def update_s3_data(cls): + env = _Environment.get() + sha = env.SHA + if not sha: + print("WARNING: Failed to retrieve commit sha") + return + commits = cls.pull_from_s3() + for commit in commits: + if sha == commit.sha: + print( + f"INFO: Sha already present in commits data [{sha}] - skip data update" + ) + return + commits.append(GitCommit(sha=sha)) + cls.push_to_s3(commits) + return + + @classmethod + def dump(cls, commits): + commits_ = [] + for commit in commits: + commits_.append(dataclasses.asdict(commit)) + with open(cls.file_name(), "w", encoding="utf8") as f: + json.dump(commits_, f) + + @classmethod + def pull_from_s3(cls): + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"WARNING: failed to cp file [{s3_path}] from s3") + return [] + return cls.from_json(local_path) + + @classmethod + def push_to_s3(cls, commits): + print(f"INFO: push commits data to s3, commits num [{len(commits)}]") + cls.dump(commits) + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_to_s3(s3_path=s3_path, local_path=local_path, text=True): + print(f"WARNING: failed to cp file [{local_path}] to s3") + + @classmethod + def get_s3_prefix(cls, pr_number, branch): + prefix = "" + assert pr_number or branch + if pr_number and pr_number > 0: + prefix += f"{pr_number}" + else: + prefix += f"{branch}" + return prefix + + @classmethod + def file_name(cls): + return f"{Settings.TEMP_DIR}/commits.json" + + # def _get_pr_commits(pr_number): + # res = [] + # if not pr_number: + # return res + # output = Shell.get_output(f"gh pr view {pr_number} --json commits") + # if output: + # res = GitCommit.from_json(output) + # return res + class HtmlRunnerHooks: @classmethod def configure(cls, _workflow): - - def _get_pr_commits(pr_number): - res = [] - if not pr_number: - return res - output = Shell.get_output(f"gh pr view {pr_number} --json commits") - if output: - res = GitCommit.from_json(output) - return res - # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success @@ -106,11 +168,9 @@ class HtmlRunnerHooks: Utils.raise_with_error( "Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed" ) - if env.PR_NUMBER: - commits = _get_pr_commits(env.PR_NUMBER) - # TODO: upload commits data to s3 to visualise it on a report page - print(commits) + # TODO: enable for branch, add commit number limiting + GitCommit.update_s3_data() @classmethod def pre_run(cls, _workflow, _job): diff --git a/ci/praktika/job.py b/ci/praktika/job.py index d0d4232cfa2..99eb08938b8 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -52,30 +52,57 @@ class Job: self, parameter: Optional[List[Any]] = None, runs_on: Optional[List[List[str]]] = None, + provides: Optional[List[List[str]]] = None, + requires: Optional[List[List[str]]] = None, timeout: Optional[List[int]] = None, ): assert ( parameter or runs_on ), "Either :parameter or :runs_on must be non empty list for parametrisation" + if runs_on: + assert isinstance(runs_on, list) and isinstance(runs_on[0], list) if not parameter: parameter = [None] * len(runs_on) if not runs_on: runs_on = [None] * len(parameter) if not timeout: timeout = [None] * len(parameter) + if not provides: + provides = [None] * len(parameter) + if not requires: + requires = [None] * len(parameter) assert ( - len(parameter) == len(runs_on) == len(timeout) - ), "Parametrization lists must be of the same size" + len(parameter) + == len(runs_on) + == len(timeout) + == len(provides) + == len(requires) + ), f"Parametrization lists must be of the same size [{len(parameter)}, {len(runs_on)}, {len(timeout)}, {len(provides)}, {len(requires)}]" res = [] - for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout): + for parameter_, runs_on_, timeout_, provides_, requires_ in zip( + parameter, runs_on, timeout, provides, requires + ): obj = copy.deepcopy(self) + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ if runs_on_: obj.runs_on = runs_on_ if timeout_: obj.timeout = timeout_ + if provides_: + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" + obj.provides = provides_ + if requires_: + assert ( + not obj.requires + ), "Job.Config.requires and parametrize(requires=...) are both set" + obj.requires = requires_ obj.name = obj.get_job_name_with_parameter() res.append(obj) return res @@ -84,13 +111,16 @@ class Job: name, parameter, runs_on = self.name, self.parameter, self.runs_on res = name name_params = [] - if isinstance(parameter, list) or isinstance(parameter, dict): - name_params.append(json.dumps(parameter)) - elif parameter is not None: - name_params.append(parameter) - if runs_on: + if parameter: + if isinstance(parameter, list) or isinstance(parameter, dict): + name_params.append(json.dumps(parameter)) + else: + name_params.append(parameter) + elif runs_on: assert isinstance(runs_on, list) name_params.append(json.dumps(runs_on)) + else: + assert False if name_params: name_params = [str(param) for param in name_params] res += f" ({', '.join(name_params)})" diff --git a/ci/praktika/json.html b/ci/praktika/json.html index af03ed702f8..f86a7b27ecb 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -200,10 +200,7 @@ } th.name-column, td.name-column { - max-width: 400px; /* Set the maximum width for the column */ - white-space: nowrap; /* Prevent text from wrapping */ - overflow: hidden; /* Hide the overflowed text */ - text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */ + min-width: 350px; } th.status-column, td.status-column { @@ -364,7 +361,6 @@ } function addKeyValueToStatus(key, value, options = null) { - const statusContainer = document.getElementById('status-container'); let keyValuePair = document.createElement('div'); @@ -374,27 +370,40 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - let valueElement - if (value) { - valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - } else if (options) { + let valueElement; + + if (options) { + // Create dropdown if options are provided valueElement = document.createElement('select'); valueElement.className = 'dropdown-value'; + + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue.slice(0, 10); + + // Set the initially selected option + if (optionValue === value) { + option.selected = true; + } + + valueElement.appendChild(option); + }); + + // Update the URL parameter when the selected value changes valueElement.addEventListener('change', (event) => { const selectedValue = event.target.value; updateUrlParameter(key, selectedValue); }); - options.forEach(optionValue => { - const option = document.createElement('option'); - option.value = optionValue; - option.textContent = optionValue; - valueElement.appendChild(option); - }); + } else { + // Create a simple text display if no options are provided + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value || 'N/A'; // Display 'N/A' if value is null } - keyValuePair.appendChild(keyElement) - keyValuePair.appendChild(valueElement) + + keyValuePair.appendChild(keyElement); + keyValuePair.appendChild(valueElement); statusContainer.appendChild(keyValuePair); } @@ -518,12 +527,12 @@ const columns = ['name', 'status', 'start_time', 'duration', 'info']; const columnSymbols = { - name: '📂', - status: '⏯️', + name: '🗂️', + status: '🧾', start_time: '🕒', duration: '⏳', - info: 'ℹ️', - files: '📄' + info: '📝', + files: '📎' }; function createResultsTable(results, nest_level) { @@ -532,16 +541,14 @@ const thead = document.createElement('thead'); const tbody = document.createElement('tbody'); - // Get the current URL parameters - const currentUrl = new URL(window.location.href); - // Create table headers based on the fixed columns const headerRow = document.createElement('tr'); columns.forEach(column => { const th = document.createElement('th'); - th.textContent = th.textContent = columnSymbols[column] || column; + th.textContent = columnSymbols[column] || column; th.style.cursor = 'pointer'; // Make headers clickable - th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table + th.setAttribute('data-sort-direction', 'asc'); // Default sort direction + th.addEventListener('click', () => sortTable(results, column, columnSymbols[column] || column, tbody, nest_level, columns)); // Add click event to sort the table headerRow.appendChild(th); }); thead.appendChild(headerRow); @@ -605,39 +612,33 @@ }); } - function sortTable(results, key, tbody, nest_level) { + function sortTable(results, column, key, tbody, nest_level, columns) { // Find the table header element for the given key - let th = null; - const tableHeaders = document.querySelectorAll('th'); // Select all table headers - tableHeaders.forEach(header => { - if (header.textContent.trim().toLowerCase() === key.toLowerCase()) { - th = header; - } - }); + const tableHeaders = document.querySelectorAll('th'); + let th = Array.from(tableHeaders).find(header => header.textContent === key); if (!th) { console.error(`No table header found for key: ${key}`); return; } - // Determine the current sort direction - let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true; + const ascending = th.getAttribute('data-sort-direction') === 'asc'; + th.setAttribute('data-sort-direction', ascending ? 'desc' : 'asc'); - // Toggle the sort direction for the next click - th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc'); - - // Sort the results array by the given key results.sort((a, b) => { - if (a[key] < b[key]) return ascending ? -1 : 1; - if (a[key] > b[key]) return ascending ? 1 : -1; + if (a[column] < b[column]) return ascending ? -1 : 1; + if (a[column] > b[column]) return ascending ? 1 : -1; return 0; }); + // Clear the existing rows in tbody + tbody.innerHTML = ''; + // Re-populate the table with sorted data populateTableRows(tbody, results, columns, nest_level); } - function loadJSON(PR, sha, nameParams) { + function loadResultsJSON(PR, sha, nameParams) { const infoElement = document.getElementById('info-container'); let lastModifiedTime = null; const task = nameParams[0].toLowerCase(); @@ -753,22 +754,61 @@ } }); - if (PR) { - addKeyValueToStatus("PR", PR) - } else { - console.error("TODO") - } - addKeyValueToStatus("sha", null, [sha, 'lala']); - if (nameParams[1]) { - addKeyValueToStatus("job", nameParams[1]); - } - addKeyValueToStatus("workflow", nameParams[0]); + let path_commits_json = ''; + let commitsArray = []; - if (PR && sha && root_name) { - loadJSON(PR, sha, nameParams); + if (PR) { + addKeyValueToStatus("PR", PR); + const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', ''); + path_commits_json = `${baseUrl}/${encodeURIComponent(PR)}/commits.json`; } else { - document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + // Placeholder for a different path when PR is missing + console.error("PR parameter is missing. Setting alternate commits path."); + path_commits_json = '/path/to/alternative/commits.json'; } + + function loadCommitsArray(path) { + return fetch(path, { cache: "no-cache" }) + .then(response => { + if (!response.ok) { + console.error(`HTTP error! status: ${response.status}`) + return []; + } + return response.json(); + }) + .then(data => { + if (Array.isArray(data) && data.every(item => typeof item === 'object' && item.hasOwnProperty('sha'))) { + return data.map(item => item.sha); + } else { + throw new Error('Invalid data format: expected array of objects with a "sha" key'); + } + }) + .catch(error => { + console.error('Error loading commits JSON:', error); + return []; // Return an empty array if an error occurs + }); + } + + loadCommitsArray(path_commits_json) + .then(data => { + commitsArray = data; + }) + .finally(() => { + // Proceed with the rest of the initialization + addKeyValueToStatus("sha", sha || "latest", commitsArray.concat(["latest"])); + + if (nameParams[1]) { + addKeyValueToStatus("job", nameParams[1]); + } + addKeyValueToStatus("workflow", nameParams[0]); + + // Check if all required parameters are present to load JSON + if (PR && sha && root_name) { + loadResultsJSON(PR, sha, nameParams); + } else { + document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + } + }); } window.onload = init; diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index 89fc52cf849..bca33f9e660 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -58,7 +58,6 @@ def _update_workflow_artifacts(workflow): artifact_job = {} for job in workflow.jobs: for artifact_name in job.provides: - assert artifact_name not in artifact_job artifact_job[artifact_name] = job.name for artifact in workflow.artifacts: artifact._provided_by = artifact_job[artifact.name] diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index f7fd4ca190b..16ffa9056e9 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -151,7 +151,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): status = Result.Status.ERROR print("ERROR: ", info) else: - Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate") + assert Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika yaml") exit_code, output, err = Shell.get_res_stdout_stderr( f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}" ) @@ -250,6 +250,9 @@ def _config_workflow(workflow: Workflow.Config, job_name): info_lines.append(job_name + ": " + info) results.append(result_) + if workflow.enable_merge_commit: + assert False, "NOT implemented" + # config: if workflow.dockers: print("Calculate docker's digests") diff --git a/ci/praktika/param.py b/ci/praktika/param.py new file mode 100644 index 00000000000..f5727198e0d --- /dev/null +++ b/ci/praktika/param.py @@ -0,0 +1,8 @@ +from praktika._environment import _Environment + + +# TODO: find better place and/or right storage for parameter +def get_param(): + env = _Environment.get() + assert env.PARAMETER + return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 2ba8309ad60..f473cf3ed05 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -1,7 +1,6 @@ import dataclasses import datetime import sys -from collections.abc import Container from pathlib import Path from typing import Any, Dict, List, Optional @@ -68,8 +67,9 @@ class Result(MetaClasses.Serializable): if isinstance(status, bool): status = Result.Status.SUCCESS if status else Result.Status.FAILED if not results and not status: - print("ERROR: Either .results or .status must be provided") - raise + Utils.raise_with_error( + f"Either .results ({results}) or .status ({status}) must be provided" + ) if not name: name = _Environment.get().JOB_NAME if not name: @@ -78,10 +78,10 @@ class Result(MetaClasses.Serializable): result_status = status or Result.Status.SUCCESS infos = [] if info: - if isinstance(info, Container): - infos += info + if isinstance(info, str): + infos += [info] else: - infos.append(info) + infos += info if results and not status: for result in results: if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED): @@ -112,7 +112,7 @@ class Result(MetaClasses.Serializable): return self.status not in (Result.Status.PENDING, Result.Status.RUNNING) def is_running(self): - return self.status not in (Result.Status.RUNNING,) + return self.status in (Result.Status.RUNNING,) def is_ok(self): return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS) @@ -180,6 +180,11 @@ class Result(MetaClasses.Serializable): ) return self + def set_timing(self, stopwatch: Utils.Stopwatch): + self.start_time = stopwatch.start_time + self.duration = stopwatch.duration + return self + def update_sub_result(self, result: "Result"): assert self.results, "BUG?" for i, result_ in enumerate(self.results): diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 823c7e0f36d..5db1a89ce99 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -125,15 +125,24 @@ class Runner: return 0 def _run(self, workflow, job, docker="", no_docker=False, param=None): + # re-set envs for local run + env = _Environment.get() + env.JOB_NAME = job.name + env.PARAMETER = job.parameter + env.dump() + if param: if not isinstance(param, str): Utils.raise_with_error( f"Custom param for local tests must be of type str, got [{type(param)}]" ) - env = _Environment.get() - env.dump() if job.run_in_docker and not no_docker: + job.run_in_docker, docker_settings = ( + job.run_in_docker.split("+")[0], + job.run_in_docker.split("+")[1:], + ) + from_root = "root" in docker_settings if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -145,7 +154,7 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -226,7 +235,8 @@ class Runner: print(info) result.set_info(info).set_status(Result.Status.ERROR).dump() - result.set_files(files=[Settings.RUN_LOG]) + if not result.is_ok(): + result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() if result.info and result.status != Result.Status.SUCCESS: @@ -329,7 +339,7 @@ class Runner: workflow, job, pr=pr, branch=branch, sha=sha ) - if res: + if res and (not local_run or pr or sha or branch): res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 8cfb70a9076..04a08622dcd 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -52,7 +52,7 @@ class S3: cmd += " --content-type text/plain" res = cls.run_command_with_retries(cmd) if not res: - raise + raise RuntimeError() bucket = s3_path.split("/")[0] endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] assert endpoint diff --git a/ci/praktika/workflow.py b/ci/praktika/workflow.py index 41e8056f9ef..8c5ec12440f 100644 --- a/ci/praktika/workflow.py +++ b/ci/praktika/workflow.py @@ -31,6 +31,7 @@ class Workflow: enable_report: bool = False enable_merge_ready_status: bool = False enable_cidb: bool = False + enable_merge_commit: bool = False def is_event_pull_request(self): return self.event == Workflow.Event.PULL_REQUEST diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index fb918b4ddba..1422a835663 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -80,6 +80,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{{{ github.event.pull_reguest.head.sha }}}} {JOB_ADDONS} - name: Prepare env script run: | diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index c67bdee015b..33173756924 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,23 +8,30 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" + STYLE_CHECKER = "style-checker" BASE_BRANCH = "master" +azure_secret = Secret.Config( + name="azure_connection_string", + type=Secret.Type.AWS_SSM_VAR, +) + SECRETS = [ Secret.Config( name="dockerhub_robot_password", type=Secret.Type.AWS_SSM_VAR, ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-id", - type=Secret.Type.AWS_SSM_SECRET, - ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-key", - type=Secret.Type.AWS_SSM_SECRET, - ), + azure_secret, + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-id", + # type=Secret.Type.AWS_SSM_SECRET, + # ), + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-key", + # type=Secret.Type.AWS_SSM_SECRET, + # ), ] DOCKERS = [ @@ -118,12 +125,12 @@ DOCKERS = [ # platforms=Docker.Platforms.arm_amd, # depends_on=["clickhouse/test-base"], # ), - # Docker.Config( - # name="clickhouse/stateless-test", - # path="./ci/docker/test/stateless", - # platforms=Docker.Platforms.arm_amd, - # depends_on=["clickhouse/test-base"], - # ), + Docker.Config( + name="clickhouse/stateless-test", + path="./ci/docker/stateless-test", + platforms=Docker.Platforms.arm_amd, + depends_on=[], + ), # Docker.Config( # name="clickhouse/stateful-test", # path="./ci/docker/test/stateful", @@ -230,5 +237,6 @@ DOCKERS = [ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" - BUILD_AMD_DEBUG = "Build amd64 debug" + BUILD = "Build" + BUILD_AMD_DEBUG = "Build (amd, debug)" STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index c7715b40fca..10dd77a0414 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -13,7 +13,8 @@ from ci.settings.definitions import ( class ArtifactNames: - ch_debug_binary = "clickhouse_debug_binary" + CH_AMD_DEBUG = "CH_AMD_DEBUG" + CH_AMD_RELEASE = "CH_AMD_RELEASE" style_check_job = Job.Config( @@ -37,10 +38,10 @@ fast_test_job = Job.Config( ), ) -job_build_amd_debug = Job.Config( - name=JobNames.BUILD_AMD_DEBUG, +amd_build_jobs = Job.Config( + name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py amd_debug", + command="python3 ./ci/jobs/build_clickhouse.py", run_in_docker="clickhouse/fasttest", digest_config=Job.CacheDigestConfig( include_paths=[ @@ -56,20 +57,30 @@ job_build_amd_debug = Job.Config( "./tests/ci/version_helper.py", ], ), - provides=[ArtifactNames.ch_debug_binary], +).parametrize( + parameter=["amd_debug", "amd_release"], + provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -stateless_tests_job = Job.Config( +statless_batch_num = 2 +stateless_tests_amd_debug_jobs = Job.Config( name=JobNames.STATELESS_TESTS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/fasttest:latest", + run_in_docker="clickhouse/stateless-test", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.ch_debug_binary], + requires=[ArtifactNames.CH_AMD_DEBUG], +).parametrize( + parameter=[ + f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) + ] + + ["non-parallel"], + runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] + + [[RunnerLabels.STYLE_CHECKER]], ) workflow = Workflow.Config( @@ -79,15 +90,20 @@ workflow = Workflow.Config( jobs=[ style_check_job, fast_test_job, - job_build_amd_debug, - stateless_tests_job, + *amd_build_jobs, + *stateless_tests_amd_debug_jobs, ], artifacts=[ Artifact.Config( - name=ArtifactNames.ch_debug_binary, + name=ArtifactNames.CH_AMD_DEBUG, type=Artifact.Type.S3, path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", - ) + ), + Artifact.Config( + name=ArtifactNames.CH_AMD_RELEASE, + type=Artifact.Type.S3, + path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", + ), ], dockers=DOCKERS, secrets=SECRETS, @@ -101,8 +117,11 @@ WORKFLOWS = [ ] # type: List[Workflow.Config] -if __name__ == "__main__": - # local job test inside praktika environment - from praktika.runner import Runner - - Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) +# if __name__ == "__main__": +# # local job test inside praktika environment +# from praktika.runner import Runner +# from praktika.digest import Digest +# +# print(Digest().calc_job_digest(amd_debug_build_job)) +# +# Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 100a6358dcf..3396b10814a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2153,9 +2153,9 @@ class TestSuite: self.sequential_tests = [] self.parallel_tests = [] for test_name in self.all_tests: - if self.is_sequential_test(test_name): + if self.is_sequential_test(test_name) and not args.no_sequential: self.sequential_tests.append(test_name) - else: + elif not args.no_parallel: self.parallel_tests.append(test_name) def is_sequential_test(self, test_name): @@ -3290,7 +3290,10 @@ def parse_args(): help='Replace random database name with "default" in stderr', ) parser.add_argument( - "--parallel", default="1/1", help="One parallel test run number/total" + "--no-sequential", action="store_true", help="Not run no-parallel" + ) + parser.add_argument( + "--no-parallel", action="store_true", help="Run only no-parallel" ) parser.add_argument( "-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel" @@ -3339,7 +3342,7 @@ def parse_args(): parser.add_argument( "--sequential", nargs="+", - help="Run these tests sequentially even if --parallel specified", + help="Run all tests sequentially", ) parser.add_argument( "--no-long", action="store_true", dest="no_long", help="Do not run long tests" diff --git a/tests/config/config.d/ssl_certs.xml b/tests/config/config.d/ssl_certs.xml index 26b679f39df..c20fef89e00 100644 --- a/tests/config/config.d/ssl_certs.xml +++ b/tests/config/config.d/ssl_certs.xml @@ -1,8 +1,8 @@ - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key + /tmp/praktika/etc/clickhouse-server/server.crt + /tmp/praktika/etc/clickhouse-server/server.key diff --git a/tests/config/install.sh b/tests/config/install.sh index be47298f6a4..cdae5741fce 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -9,6 +9,21 @@ DEST_SERVER_PATH="${1:-/etc/clickhouse-server}" DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}" SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +shift # DEST_SERVER_PATH +shift # DEST_CLIENT_PATH + +FAST_TEST=0 +S3_STORAGE=0 + +while [[ "$#" -gt 0 ]]; do + case $1 in + --fast-test) FAST_TEST=1 ;; + --s3-storage) S3_STORAGE=1 ;; + *) echo "Unknown option: $1" ; exit 1 ;; + esac + shift +done + echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH" mkdir -p $DEST_SERVER_PATH/config.d/ @@ -72,9 +87,8 @@ ln -sf $SRC_PATH/config.d/serverwide_trace_collector.xml $DEST_SERVER_PATH/confi ln -sf $SRC_PATH/config.d/rocksdb.xml $DEST_SERVER_PATH/config.d/ # Not supported with fasttest. -if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] -then - ln -sf $SRC_PATH/config.d/legacy_geobase.xml $DEST_SERVER_PATH/config.d/ +if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] || [ "$FAST_TEST" != "1" ]; then + ln -sf "$SRC_PATH/config.d/legacy_geobase.xml" "$DEST_SERVER_PATH/config.d/" fi ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ @@ -185,7 +199,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/ fi -if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then +if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ @@ -195,7 +209,7 @@ if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/ fi -if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then +if [[ "$USE_DATABASE_REPLICATED" == "1" ]]; then ln -sf $SRC_PATH/users.d/database_replicated.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/config.d/database_replicated.xml $DEST_SERVER_PATH/config.d/ rm /etc/clickhouse-server/config.d/zookeeper.xml diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 40e93e713a1..837c05a9c5d 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -5,6 +5,12 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +if [ -d "$TEMP_DIR" ]; then + cd "$TEMP_DIR" + # add / for minio mc in docker + PATH="/:.:$PATH" +fi + usage() { echo $"Usage: $0 (default path: /usr/share/clickhouse-test)" exit 1 @@ -70,9 +76,10 @@ download_minio() { } start_minio() { + pwd mkdir -p ./minio_data - ./minio --version - ./minio server --address ":11111" ./minio_data & + minio --version + minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -80,12 +87,14 @@ start_minio() { setup_minio() { local test_type=$1 - ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse - ./mc admin user add clickminio test testtest - ./mc admin policy attach clickminio readwrite --user=test - ./mc mb --ignore-existing clickminio/test + echo "setup_minio(), test_type=$test_type" + mc alias set clickminio http://localhost:11111 clickhouse clickhouse + mc admin user add clickminio test testtest + mc admin policy attach clickminio readwrite --user=test ||: + mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - ./mc anonymous set public clickminio/test + echo "Create @test bucket in minio" + mc anonymous set public clickminio/test fi } @@ -95,12 +104,13 @@ upload_data() { local query_dir=$1 local test_path=$2 local data_path=${test_path}/queries/${query_dir}/data_minio + echo "upload_data() data_path=$data_path" # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 if [ -d "${data_path}" ]; then - ./mc cp --recursive "${data_path}"/ clickminio/test/ + mc cp --recursive "${data_path}"/ clickminio/test/ fi } @@ -138,7 +148,7 @@ wait_for_it() { main() { local query_dir query_dir=$(check_arg "$@") - if [ ! -f ./minio ]; then + if ! (minio --version && mc --version); then download_minio fi start_minio From 5b5652a2a6d1b44bfefa676423a93837be26fc17 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 5 Nov 2024 14:00:49 +0100 Subject: [PATCH 097/267] smaller value --- src/Processors/QueryPlan/JoinStep.cpp | 2 +- src/QueryPipeline/QueryPipelineBuilder.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 55cc1020095..7824cd5dbd5 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -75,7 +75,7 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines &processors); ppl->addSimpleTransform([&](const Block & header) - { return std::make_shared(header, max_block_size / 2, 1_MiB / 2); }); + { return std::make_shared(header, max_block_size / 2, 0); }); return ppl; } diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 157577d733d..463d18ed7a2 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -442,7 +442,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Processors processors; for (auto & outport : outports) { - auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 1_MiB / 2); + auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 0); connect(*outport, squashing->getInputs().front()); processors.emplace_back(squashing); auto adding_joined = std::make_shared(right->getHeader(), join); @@ -501,7 +501,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Block left_header = left->getHeader(); for (size_t i = 0; i < num_streams; ++i) { - auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 1_MiB / 2); + auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 0); connect(**lit, squashing->getInputs().front()); auto joining = std::make_shared( From d5b1b811976e4878af39c18b7727d35750bdbc3c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 5 Nov 2024 14:01:19 +0100 Subject: [PATCH 098/267] fix perf test --- src/Interpreters/ConcurrentHashJoin.cpp | 9 ++++++++- src/Interpreters/ConcurrentHashJoin.h | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index d243b223241..aa1673655be 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -402,9 +402,16 @@ ScatteredBlocks scatterBlocksWithSelector(size_t num_shards, const IColumn::Sele return result; } -ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) +ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, Block & from_block) { size_t num_shards = hash_joins.size(); + if (num_shards == 1) + { + ScatteredBlocks res; + res.emplace_back(std::move(from_block)); + return res; + } + IColumn::Selector selector = selectDispatchBlock(num_shards, key_columns_names, from_block); /// With zero-copy approach we won't copy the source columns, but will create a new one with indices. diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 48d487ba433..555a61d4004 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -79,7 +79,7 @@ private: std::mutex totals_mutex; Block totals; - ScatteredBlocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); + ScatteredBlocks dispatchBlock(const Strings & key_columns_names, Block & from_block); }; UInt64 calculateCacheKey(std::shared_ptr & table_join, const QueryTreeNodePtr & right_table_expression); From 8e1d85b0b858c2fd5d1509f8c854aa55693b03cf Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 5 Nov 2024 15:07:18 +0100 Subject: [PATCH 099/267] new setting --- src/Core/Settings.cpp | 3 +++ src/Interpreters/InterpreterSelectQuery.cpp | 1 + src/Planner/PlannerJoinTree.cpp | 2 ++ src/Processors/QueryPlan/JoinStep.cpp | 10 ++++++++-- src/Processors/QueryPlan/JoinStep.h | 2 ++ src/QueryPipeline/QueryPipelineBuilder.cpp | 5 +++-- src/QueryPipeline/QueryPipelineBuilder.h | 1 + 7 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index df25f395624..9612b4511b2 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -143,6 +143,9 @@ Squash blocks passed to the external table to a specified size in bytes, if bloc )", 0) \ M(UInt64, max_joined_block_size_rows, DEFAULT_BLOCK_SIZE, R"( Maximum block size for JOIN result (if join algorithm supports it). 0 means unlimited. +)", 0) \ + M(UInt64, min_joined_block_size_rows, 1024, R"( +Minimum block size for JOIN result (if join algorithm supports it). 0 means unlimited. )", 0) \ M(UInt64, max_insert_threads, 0, R"( The maximum number of threads to execute the `INSERT SELECT` query. diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index d8c35285210..0932c1d71e9 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1886,6 +1886,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

getCurrentHeader(), expressions.join, settings[Setting::max_block_size], + 0, max_streams, analysis_result.optimize_read_in_order); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..72492db84d2 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -104,6 +104,7 @@ namespace Setting extern const SettingsBool optimize_move_to_prewhere; extern const SettingsBool optimize_move_to_prewhere_if_final; extern const SettingsBool use_concurrency_control; + extern const SettingsUInt64 min_joined_block_size_rows; } namespace ErrorCodes @@ -1623,6 +1624,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ right_plan.getCurrentHeader(), std::move(join_algorithm), settings[Setting::max_block_size], + settings[Setting::min_joined_block_size_rows], settings[Setting::max_threads], false /*optimize_read_in_order*/); diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 7824cd5dbd5..91e4869f2c1 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -44,9 +44,14 @@ JoinStep::JoinStep( const Header & right_header_, JoinPtr join_, size_t max_block_size_, + size_t min_block_size_, size_t max_streams_, bool keep_left_read_in_order_) - : join(std::move(join_)), max_block_size(max_block_size_), max_streams(max_streams_), keep_left_read_in_order(keep_left_read_in_order_) + : join(std::move(join_)) + , max_block_size(max_block_size_) + , min_block_size(min_block_size_) + , max_streams(max_streams_) + , keep_left_read_in_order(keep_left_read_in_order_) { updateInputHeaders({left_header_, right_header_}); } @@ -70,12 +75,13 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines join, *output_header, max_block_size, + min_block_size, max_streams, keep_left_read_in_order, &processors); ppl->addSimpleTransform([&](const Block & header) - { return std::make_shared(header, max_block_size / 2, 0); }); + { return std::make_shared(header, min_block_size, 0); }); return ppl; } diff --git a/src/Processors/QueryPlan/JoinStep.h b/src/Processors/QueryPlan/JoinStep.h index 2793784d633..9222ced2e55 100644 --- a/src/Processors/QueryPlan/JoinStep.h +++ b/src/Processors/QueryPlan/JoinStep.h @@ -18,6 +18,7 @@ public: const Header & right_header_, JoinPtr join_, size_t max_block_size_, + size_t min_block_size_, size_t max_streams_, bool keep_left_read_in_order_); @@ -39,6 +40,7 @@ private: JoinPtr join; size_t max_block_size; + size_t min_block_size; size_t max_streams; bool keep_left_read_in_order; }; diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 463d18ed7a2..a9a3b081fac 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -386,6 +386,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe JoinPtr join, const Block & output_header, size_t max_block_size, + size_t min_block_size, size_t max_streams, bool keep_left_read_in_order, Processors * collected_processors) @@ -442,7 +443,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Processors processors; for (auto & outport : outports) { - auto squashing = std::make_shared(right->getHeader(), max_block_size / 2, 0); + auto squashing = std::make_shared(right->getHeader(), min_block_size, 0); connect(*outport, squashing->getInputs().front()); processors.emplace_back(squashing); auto adding_joined = std::make_shared(right->getHeader(), join); @@ -501,7 +502,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Block left_header = left->getHeader(); for (size_t i = 0; i < num_streams; ++i) { - auto squashing = std::make_shared(left->getHeader(), max_block_size / 2, 0); + auto squashing = std::make_shared(left->getHeader(), min_block_size, 0); connect(**lit, squashing->getInputs().front()); auto joining = std::make_shared( diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index a9e5b1535c0..34bb62ee0d2 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -126,6 +126,7 @@ public: JoinPtr join, const Block & output_header, size_t max_block_size, + size_t min_block_size, size_t max_streams, bool keep_left_read_in_order, Processors * collected_processors = nullptr); From 98ee0893318bcfd4e0d63b564f513b37579bd3c8 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 5 Nov 2024 17:31:47 +0000 Subject: [PATCH 100/267] Cleanup --- tests/queries/0_stateless/03261_pr_semi_anti_join.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql index d2ea3725d6b..2d671756d6e 100644 --- a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -1,6 +1,5 @@ DROP TABLE IF EXISTS t1 SYNC; DROP TABLE IF EXISTS t2 SYNC; -create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); From 699b9d40263078285a8fce6a031bc74ce72c16d3 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Wed, 6 Nov 2024 20:20:44 +0800 Subject: [PATCH 101/267] fix comments --- src/Functions/parseDateTime.cpp | 277 ++++++++++++++++++++++---------- 1 file changed, 194 insertions(+), 83 deletions(-) diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 65bc65fb45c..976be53a21e 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -57,8 +57,15 @@ namespace Null }; + enum class ReturnType: uint8_t + { + DateTime, + DateTime64 + }; + constexpr Int32 minYear = 1970; constexpr Int32 maxYear = 2106; + constexpr Int32 maxPrecisionOfDateTime64 = 6; const std::unordered_map> dayOfWeekMap{ {"mon", {"day", 1}}, @@ -570,8 +577,8 @@ namespace } }; - /// _FUNC_(str[, format, timezone]) - template + /// _FUNC_(str[scale, format, timezone]) + template class FunctionParseDateTimeImpl : public IFunction { public: @@ -602,79 +609,112 @@ namespace {"time", static_cast(&isString), nullptr, "String"} }; - FunctionArgumentDescriptors optional_args{ - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - + FunctionArgumentDescriptors optional_args; + if constexpr (return_type == ReturnType::DateTime64) + { + optional_args = { + {"precision or format", static_cast([](const IDataType & data_type) -> bool { + return isUInt(data_type) || isString(data_type); + }), nullptr, "Number or String"}, + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; + } + else + optional_args = { + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); - DataTypePtr date_type = nullptr; - if constexpr (parseDateTime64) + DataTypePtr data_type; + if constexpr (return_type == ReturnType::DateTime64) { - String format = getFormat(arguments); - std::vector instructions = parseFormat(format); - UInt32 scale = 0; - if (!instructions.empty()) + if (arguments.size() == 1) + return std::make_shared(0, time_zone_name); + else { - for (const auto & ins : instructions) + UInt32 precision = 0; + if (isUInt(arguments[1].type)) { - if (scale > 0) - break; - const String fragment = ins.getFragment(); + const auto * col_precision = checkAndGetColumnConst(arguments[1].column.get()); + if (col_precision) + precision = col_precision->getValue(); + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The input precision value may exceed the max value of `DateTime64`: {}.", + maxPrecisionOfDateTime64); + } + /// Construct the return type `DataTypDateTime64` with precision and time zone name. The precision value can be specified or be extracted + /// from the format string by computing how many 'S' characters are contained in the format's micorsceond fragment. + String format = getFormat(arguments, precision); + std::vector instructions = parseFormat(format); + for (const auto & instruction : instructions) + { + const String & fragment = instruction.getFragment(); + UInt32 val = 0; for (char ch : fragment) { if (ch != 'S') { - scale = 0; + val = 0; break; } else - scale++; + val++; } + /// If the precision is already specified by the second parameter, but it not equals the value that extract from the format string, + /// then we should throw an exception; If the precision is not specified, then we set its value as the extracted one. + if (val != 0 && precision != 0 && val != precision) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The precision of input format string {} not equals the given precision value {}.", + format, + precision); + else if (precision == 0 && val != 0) + precision = val; } + if (precision > maxPrecisionOfDateTime64) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The precision of the input format string {} exceed the max precision value {}.", + format, + maxPrecisionOfDateTime64); + data_type = std::make_shared(precision, time_zone_name); } - date_type = std::make_shared(scale, time_zone_name); } else - date_type = std::make_shared(time_zone_name); + data_type = std::make_shared(time_zone_name); if (error_handling == ErrorHandling::Null) - return std::make_shared(date_type); - return date_type; + return std::make_shared(data_type); + return data_type; } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { - ColumnUInt8::MutablePtr col_null_map; + DataTypePtr non_null_result_type; if constexpr (error_handling == ErrorHandling::Null) - col_null_map = ColumnUInt8::create(input_rows_count, 0); - if constexpr (parseDateTime64) + non_null_result_type = removeNullable(result_type); + else + non_null_result_type = result_type; + + if constexpr (return_type == ReturnType::DateTime64) { - const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - auto col_res = ColumnDateTime64::create(input_rows_count, datatime64_type->getScale()); - PaddedPODArray & res_data = col_res->getData(); - executeImpl2(arguments, result_type, input_rows_count, res_data, col_null_map); - if constexpr (error_handling == ErrorHandling::Null) - return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); - else - return col_res; + const auto * datatime64_type = checkAndGetDataType(non_null_result_type.get()); + MutableColumnPtr col_res = ColumnDateTime64::create(input_rows_count, datatime64_type->getScale()); + ColumnDateTime64 * col_datetime64 = assert_cast(col_res.get()); + return executeImpl2(arguments, result_type, input_rows_count, col_res, col_datetime64->getData()); } else { - auto col_res = ColumnDateTime::create(input_rows_count); - PaddedPODArray & res_data = col_res->getData(); - executeImpl2(arguments, result_type, input_rows_count, res_data, col_null_map); - if constexpr (error_handling == ErrorHandling::Null) - return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); - else - return col_res; + MutableColumnPtr col_res = ColumnDateTime::create(input_rows_count); + ColumnDateTime * col_datetime = assert_cast(col_res.get()); + return executeImpl2(arguments, result_type, input_rows_count, col_res, col_datetime->getData()); } } template - void executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, - PaddedPODArray & res_data, ColumnUInt8::MutablePtr & col_null_map) const + ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, + MutableColumnPtr & col_res, PaddedPODArray & res_data) const { const auto * col_str = checkAndGetColumn(arguments[0].column.get()); if (!col_str) @@ -683,8 +723,21 @@ namespace "Illegal column {} of first ('str') argument of function {}. Must be string.", arguments[0].column->getName(), getName()); + + ColumnUInt8::MutablePtr col_null_map; + if constexpr (error_handling == ErrorHandling::Null) + col_null_map = ColumnUInt8::create(input_rows_count, 0); - String format = getFormat(arguments); + Int64 multiplier = 0; + UInt32 precision = 0; + if constexpr (return_type == ReturnType::DateTime64) + { + const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); + precision = datatime64_type->getScale(); + multiplier = DecimalUtils::scaleMultiplier(precision); + } + + String format = getFormat(arguments, precision); const auto & time_zone = getTimeZone(arguments); std::vector instructions = parseFormat(format); @@ -733,8 +786,8 @@ namespace Int64OrError result = 0; - /// Ensure all input was consumed - if (!parseDateTime64 && cur < end) + /// Ensure all input was consumed when the return type is `DateTime`. + if (return_type == ReturnType::DateTime && cur < end) { result = tl::unexpected(ErrorCodeAndMessage( ErrorCodes::CANNOT_PARSE_DATETIME, @@ -747,12 +800,8 @@ namespace { if (result = datetime.buildDateTime(time_zone); result.has_value()) { - if constexpr (parseDateTime64) - { - const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - Int64 multiplier = DecimalUtils::scaleMultiplier(datatime64_type->getScale()); + if constexpr (return_type == ReturnType::DateTime64) res_data[i] = static_cast(*result) * multiplier + datetime.microsecond; - } else res_data[i] = static_cast(*result); } @@ -777,6 +826,10 @@ namespace } } } + if constexpr (error_handling == ErrorHandling::Null) + return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); + else + return std::move(col_res); } @@ -808,7 +861,7 @@ namespace explicit Instruction(const String & literal_) : literal(literal_), fragment("LITERAL") { } explicit Instruction(String && literal_) : literal(std::move(literal_)), fragment("LITERAL") { } - String getFragment() const { return fragment; } + const String & getFragment() const { return fragment; } /// For debug [[maybe_unused]] String toString() const @@ -1695,7 +1748,7 @@ namespace } [[nodiscard]] - static PosOrError jodaMicroSecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime & date) + static PosOrError jodaMicrosecondOfSecond(size_t repetitions, Pos cur, Pos end, const String & fragment, DateTime & date) { Int32 microsecond; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, microsecond))) @@ -1704,25 +1757,25 @@ namespace } [[nodiscard]] - static PosOrError jodaTimezoneId(size_t, Pos cur, Pos end, const String &, DateTime & date) + static PosOrError jodaTimezone(size_t, Pos cur, Pos end, const String &, DateTime & date) { - String dateTimeZone; + String read_time_zone; while (cur <= end) { - dateTimeZone += *cur; + read_time_zone += *cur; ++cur; } - const DateLUTImpl & date_time_zone = DateLUT::instance(dateTimeZone); + const DateLUTImpl & date_time_zone = DateLUT::instance(read_time_zone); const auto result = date.buildDateTime(date_time_zone); if (result.has_value()) { - const auto timezoneOffset = date_time_zone.timezoneOffset(*result); + const DateLUTImpl::Time timezone_offset = date_time_zone.timezoneOffset(*result); date.has_time_zone_offset = true; - date.time_zone_offset = timezoneOffset; + date.time_zone_offset = timezone_offset; return cur; } else - RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", dateTimeZone) + RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", read_time_zone) } [[nodiscard]] @@ -1745,8 +1798,22 @@ namespace Int32 hour; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, hour))) + if (hour < 0 || hour > 23) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the hour of datetime not in range [0, 23]: {}", + fragment, + std::string_view(cur, end - cur), + std::string_view(cur, 1)) Int32 minute; ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, minute))) + if (minute < 0 || minute > 59) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the minute of datetime not in range [0, 59]: {}", + fragment, + std::string_view(cur, end - cur), + std::string_view(cur, 1)) date.has_time_zone_offset = true; date.time_zone_offset = sign * (hour * 3600 + minute * 60); return cur; @@ -2133,10 +2200,10 @@ namespace instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaSecondOfMinute, repetitions)); break; case 'S': - instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicroSecondOfSecond, repetitions)); + instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaMicrosecondOfSecond, repetitions)); break; case 'z': - instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneId, repetitions)); + instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezone, repetitions)); break; case 'Z': instructions.emplace_back(ACTION_ARGS_WITH_BIND(Instruction::jodaTimezoneOffset, repetitions)); @@ -2156,26 +2223,45 @@ namespace } - String getFormat(const ColumnsWithTypeAndName & arguments) const + String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 precision) const { - if (arguments.size() == 1) + size_t format_arg_index = 1; + if constexpr (return_type == ReturnType::DateTime64) { - if constexpr (parse_syntax == ParseSyntax::MySQL) - return "%Y-%m-%d %H:%i:%s"; + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated + /// as default value `yyyy-MM-dd HH:mm:ss`. + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// then the second argument is the format. + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// then the third argument is the format. + if (arguments.size() > 1 && isString(removeNullable(arguments[1].type))) + format_arg_index = 1; else - return "yyyy-MM-dd HH:mm:ss"; + format_arg_index = 2; + } + + if (arguments.size() <= format_arg_index) + { + String format; + if constexpr (parse_syntax == ParseSyntax::MySQL) + format = "%Y-%m-%d %H:%i:%s"; + else + format = "yyyy-MM-dd HH:mm:ss"; + if (precision > 0) + format += "." + String(precision, 'S'); + return format; } else { - if (!arguments[1].column || !isColumnConst(*arguments[1].column)) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", 1, getName()); + if (!arguments[format_arg_index].column || !isColumnConst(*arguments[format_arg_index].column)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", format_arg_index, getName()); - const auto * col_format = checkAndGetColumnConst(arguments[1].column.get()); + const auto * col_format = checkAndGetColumnConst(arguments[format_arg_index].column.get()); if (!col_format) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second ('format') argument of function {}. Must be constant string.", - arguments[1].column->getName(), + arguments[format_arg_index].column->getName(), getName()); return col_format->getValue(); } @@ -2183,15 +2269,19 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - if (arguments.size() < 3) + size_t timezone_arg_index = 2; + if constexpr (return_type == ReturnType::DateTime64) + timezone_arg_index = 3; + + if (arguments.size() <= timezone_arg_index) return DateLUT::instance(); - const auto * col = checkAndGetColumnConst(arguments[2].column.get()); + const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of third ('timezone') argument of function {}. Must be constant String.", - arguments[2].column->getName(), + arguments[timezone_arg_index].column->getName(), getName()); String time_zone = col->getValue(); @@ -2229,6 +2319,21 @@ namespace static constexpr auto name = "parseDateTimeInJodaSyntaxOrNull"; }; + struct NameParseDateTime64 + { + static constexpr auto name = "parseDateTime64"; + }; + + struct NameParseDateTime64OrZero + { + static constexpr auto name = "parseDateTime64OrZero"; + }; + + struct NameParseDateTime64OrNull + { + static constexpr auto name = "parseDateTime64OrNull"; + }; + struct NameParseDateTime64InJodaSyntax { static constexpr auto name = "parseDateTime64InJodaSyntax"; @@ -2244,15 +2349,18 @@ namespace static constexpr auto name = "parseDateTime64InJodaSyntaxOrNull"; }; - using FunctionParseDateTime = FunctionParseDateTimeImpl; - using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl; - using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime = FunctionParseDateTimeImpl; + using FunctionParseDateTimeOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTimeOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime64 = FunctionParseDateTimeImpl; + using FunctionParseDateTime64OrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTime64OrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntax = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntaxOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTimeInJodaSyntaxOrNull = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntax = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntaxOrZero = FunctionParseDateTimeImpl; + using FunctionParseDateTime64InJodaSyntaxOrNull = FunctionParseDateTimeImpl; } REGISTER_FUNCTION(ParseDateTime) @@ -2262,6 +2370,9 @@ REGISTER_FUNCTION(ParseDateTime) factory.registerFunction(); factory.registerFunction(); factory.registerAlias("str_to_date", FunctionParseDateTimeOrNull::name, FunctionFactory::Case::Insensitive); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From c1345d38c8e987838704a4ae7da6cb05af8257c2 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 6 Nov 2024 15:44:22 +0000 Subject: [PATCH 102/267] Fix flakiness in 03254_pr_join_on_dups --- src/Interpreters/IJoin.h | 1 - src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp | 1 - tests/queries/0_stateless/03254_pr_join_on_dups.sql | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h index 8f648de2538..5a83137ca2a 100644 --- a/src/Interpreters/IJoin.h +++ b/src/Interpreters/IJoin.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include diff --git a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp index c0b31864eac..35d340b4bbf 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp @@ -16,7 +16,6 @@ #include #include -#include namespace DB::QueryPlanOptimizations { diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 222f7693090..166910d496f 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -10,6 +10,7 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 174550e1bf23c859c7930bbd145399e6fad46f1e Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 13:28:10 +0330 Subject: [PATCH 103/267] Added "date_time_utc" Signed-off-by: xogoodnow --- .../settings.md | 1 + src/Loggers/OwnJSONPatternFormatter.cpp | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 02fa5a8ca58..dd7f22f5c97 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1629,6 +1629,7 @@ You can specify the log format that will be outputted in the console log. Curren ```json { + "date_time_utc": "2024-11-06T09:06:09Z", "date_time": "1650918987.180175", "thread_name": "#1", "thread_id": "254545", diff --git a/src/Loggers/OwnJSONPatternFormatter.cpp b/src/Loggers/OwnJSONPatternFormatter.cpp index 4263ad5925a..3eccb176c1f 100644 --- a/src/Loggers/OwnJSONPatternFormatter.cpp +++ b/src/Loggers/OwnJSONPatternFormatter.cpp @@ -7,12 +7,18 @@ #include #include #include +#include +#include + OwnJSONPatternFormatter::OwnJSONPatternFormatter(Poco::Util::AbstractConfiguration & config) { if (config.has("logger.formatting.names.date_time")) date_time = config.getString("logger.formatting.names.date_time", ""); + if (config.has("logger.formatting.names.date_time_utc")) + date_time_utc= config.getString("logger.formatting.names.date_time_utc", ""); + if (config.has("logger.formatting.names.thread_name")) thread_name = config.getString("logger.formatting.names.thread_name", ""); @@ -41,6 +47,7 @@ OwnJSONPatternFormatter::OwnJSONPatternFormatter(Poco::Util::AbstractConfigurati && logger_name.empty() && message.empty() && source_file.empty() && source_line.empty()) { date_time = "date_time"; + date_time_utc = "date_time_utc"; thread_name = "thread_name"; thread_id = "thread_id"; level = "level"; @@ -62,8 +69,22 @@ void OwnJSONPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ const Poco::Message & msg = msg_ext.base; DB::writeChar('{', wb); + if (!date_time_utc.empty()) + { + writeJSONString(date_time_utc, wb, settings); + DB::writeChar(':', wb); + + DB::writeChar('\"', wb); + static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); + writeDateTimeTextISO(msg_ext.time_seconds, 0, wb, utc_time_zone); + + DB::writeChar('\"', wb); + print_comma = true; + } + if (!date_time.empty()) { + if (print_comma) DB::writeChar(',', wb); writeJSONString(date_time, wb, settings); DB::writeChar(':', wb); @@ -81,6 +102,7 @@ void OwnJSONPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ print_comma = true; } + if (!thread_name.empty()) { if (print_comma) From 1c74206bf2fddd2aad8f96699769e20dd122979a Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 18:00:14 +0800 Subject: [PATCH 104/267] add parseDateTime64 functions --- src/Functions/parseDateTime.cpp | 145 ++++++++++++------ .../03252_parse_datetime64.reference | 17 ++ .../0_stateless/03252_parse_datetime64.sql | 32 ++++ ..._parse_datetime64_in_joda_syntax.reference | 32 ++-- .../03252_parse_datetime64_in_joda_syntax.sql | 60 ++++++-- 5 files changed, 214 insertions(+), 72 deletions(-) create mode 100644 tests/queries/0_stateless/03252_parse_datetime64.reference create mode 100644 tests/queries/0_stateless/03252_parse_datetime64.sql diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 976be53a21e..9f7f78dcbe2 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -193,6 +193,7 @@ namespace Int32 minute = 0; /// range [0, 59] Int32 second = 0; /// range [0, 59] Int32 microsecond = 0; /// range [0, 999999] + UInt32 scale = 0; /// The microsecond scale of DateTime64. bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime bool hour_starts_at_1 = false; /// Whether the hour is clockhour @@ -221,6 +222,7 @@ namespace minute = 0; second = 0; microsecond = 0; + scale = 0; is_am = true; hour_starts_at_1 = false; @@ -599,7 +601,7 @@ namespace bool useDefaultImplementationForConstants() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; } bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } @@ -613,9 +615,9 @@ namespace if constexpr (return_type == ReturnType::DateTime64) { optional_args = { - {"precision or format", static_cast([](const IDataType & data_type) -> bool { + {"scale/format", static_cast([](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); - }), nullptr, "Number or String"}, + }), nullptr, "UInt or String"}, {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; @@ -631,24 +633,34 @@ namespace DataTypePtr data_type; if constexpr (return_type == ReturnType::DateTime64) { + UInt32 scale = 0; if (arguments.size() == 1) - return std::make_shared(0, time_zone_name); + { + /// In MySQL parse syntax, the scale of microseond is 6. + if constexpr (parse_syntax == ParseSyntax::MySQL) + scale = 6; + } else { - UInt32 precision = 0; if (isUInt(arguments[1].type)) { - const auto * col_precision = checkAndGetColumnConst(arguments[1].column.get()); - if (col_precision) - precision = col_precision->getValue(); + const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); + if (col_scale) + scale = col_scale->getValue(); else throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input precision value may exceed the max value of `DateTime64`: {}.", + "The input scale value may exceed the max value of `DateTime64`: {}.", maxPrecisionOfDateTime64); } - /// Construct the return type `DataTypDateTime64` with precision and time zone name. The precision value can be specified or be extracted - /// from the format string by computing how many 'S' characters are contained in the format's micorsceond fragment. - String format = getFormat(arguments, precision); + else + { + if constexpr (parse_syntax == ParseSyntax::MySQL) + scale = 6; + } + + /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted + /// from the format string by c how many 'S' characters are contained in the format's micorsceond fragment. + String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) { @@ -664,26 +676,27 @@ namespace else val++; } - /// If the precision is already specified by the second parameter, but it not equals the value that extract from the format string, - /// then we should throw an exception; If the precision is not specified, then we set its value as the extracted one. - if (val != 0 && precision != 0 && val != precision) + /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, + /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. + if (val != 0 && scale != 0 && val != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The precision of input format string {} not equals the given precision value {}.", + "The scale of input format string {} not equals the given scale value {}.", format, - precision); - else if (precision == 0 && val != 0) - precision = val; + scale); + else if (scale == 0 && val != 0) + scale = val; } - if (precision > maxPrecisionOfDateTime64) + if (scale > maxPrecisionOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The precision of the input format string {} exceed the max precision value {}.", + "The scale of the input format string {} exceed the max scale value {}.", format, maxPrecisionOfDateTime64); - data_type = std::make_shared(precision, time_zone_name); } + data_type = std::make_shared(scale, time_zone_name); } else data_type = std::make_shared(time_zone_name); + if (error_handling == ErrorHandling::Null) return std::make_shared(data_type); return data_type; @@ -729,15 +742,15 @@ namespace col_null_map = ColumnUInt8::create(input_rows_count, 0); Int64 multiplier = 0; - UInt32 precision = 0; + UInt32 scale = 0; if constexpr (return_type == ReturnType::DateTime64) { const DataTypeDateTime64 * datatime64_type = checkAndGetDataType(removeNullable(result_type).get()); - precision = datatime64_type->getScale(); - multiplier = DecimalUtils::scaleMultiplier(precision); + scale = datatime64_type->getScale(); + multiplier = DecimalUtils::scaleMultiplier(scale); } - String format = getFormat(arguments, precision); + const String format = getFormat(arguments, scale); const auto & time_zone = getTimeZone(arguments); std::vector instructions = parseFormat(format); @@ -746,6 +759,9 @@ namespace for (size_t i = 0; i < input_rows_count; ++i) { datetime.reset(); + if constexpr (return_type == ReturnType::DateTime64) + datetime.scale = scale; + StringRef str_ref = col_str->getDataAt(i); Pos cur = str_ref.data; Pos end = str_ref.data + str_ref.size; @@ -787,7 +803,7 @@ namespace Int64OrError result = 0; /// Ensure all input was consumed when the return type is `DateTime`. - if (return_type == ReturnType::DateTime && cur < end) + if (cur < end) { result = tl::unexpected(ErrorCodeAndMessage( ErrorCodes::CANNOT_PARSE_DATETIME, @@ -938,6 +954,28 @@ namespace return cur; } + template + [[nodiscard]] + static PosOrError readNumber6(Pos cur, Pos end, [[maybe_unused]] const String & fragment, T & res) + { + if constexpr (need_check_space == NeedCheckSpace::Yes) + RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "readNumber6 requires size >= 6", fragment)) + + res = (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + res = res * 10 + (*cur - '0'); + ++cur; + return cur; + } + [[nodiscard]] static VoidOrError checkSpace(Pos cur, Pos end, size_t len, const String & msg, const String & fragment) { @@ -1358,13 +1396,18 @@ namespace } [[nodiscard]] - static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime & /*date*/) + static PosOrError mysqlMicrosecond(Pos cur, Pos end, const String & fragment, DateTime & date) { - RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 6, "mysqlMicrosecond requires size >= 6", fragment)) - - for (size_t i = 0; i < 6; ++i) - ASSIGN_RESULT_OR_RETURN_ERROR(cur, (assertNumber(cur, end, fragment))) - + if (date.scale != 6) + RETURN_ERROR( + ErrorCodes::CANNOT_PARSE_DATETIME, + "Unable to parse fragment {} from {} because of the microsecond's scale {} is not 6", + fragment, + std::string_view(cur, end - cur), + std::to_string(date.scale)) + Int32 microsecond = 0; + ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumber6(cur, end, fragment, microsecond))) + RETURN_ERROR_IF_FAILED(date.setMicrosecond(microsecond)) return cur; } @@ -1775,7 +1818,7 @@ namespace return cur; } else - RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to build date time from timezone {}", read_time_zone) + RETURN_ERROR(ErrorCodes::CANNOT_PARSE_DATETIME, "Unable to parse date time from timezone {}", read_time_zone) } [[nodiscard]] @@ -2223,7 +2266,7 @@ namespace } - String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 precision) const + String getFormat(const ColumnsWithTypeAndName & arguments, UInt32 scale) const { size_t format_arg_index = 1; if constexpr (return_type == ReturnType::DateTime64) @@ -2247,15 +2290,17 @@ namespace format = "%Y-%m-%d %H:%i:%s"; else format = "yyyy-MM-dd HH:mm:ss"; - if (precision > 0) - format += "." + String(precision, 'S'); + if (scale > 0) + { + if constexpr (parse_syntax == ParseSyntax::MySQL) + format += ".%f"; + else + format += "." + String(scale, 'S'); + } return format; } else { - if (!arguments[format_arg_index].column || !isColumnConst(*arguments[format_arg_index].column)) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument at index {} for function {} must be constant", format_arg_index, getName()); - const auto * col_format = checkAndGetColumnConst(arguments[format_arg_index].column.get()); if (!col_format) throw Exception( @@ -2269,18 +2314,24 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - size_t timezone_arg_index = 2; - if constexpr (return_type == ReturnType::DateTime64) - timezone_arg_index = 3; - - if (arguments.size() <= timezone_arg_index) + if (arguments.size() < 3) return DateLUT::instance(); - + else if constexpr (return_type == ReturnType::DateTime64) + { + /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: + /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') + /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the + /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as + /// `DateLUT::instance()`. + if (isUInt(arguments[1].type) && arguments.size() < 4) + return DateLUT::instance(); + } + size_t timezone_arg_index = arguments.size() - 1; const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) throw Exception( ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of third ('timezone') argument of function {}. Must be constant String.", + "Illegal column {} of ('timezone') argument of function {}. Must be constant String.", arguments[timezone_arg_index].column->getName(), getName()); diff --git a/tests/queries/0_stateless/03252_parse_datetime64.reference b/tests/queries/0_stateless/03252_parse_datetime64.reference new file mode 100644 index 00000000000..27dcef6bf68 --- /dev/null +++ b/tests/queries/0_stateless/03252_parse_datetime64.reference @@ -0,0 +1,17 @@ +2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000000 +1970-01-01 08:00:00.000 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123456 +\N +\N +\N +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +\N diff --git a/tests/queries/0_stateless/03252_parse_datetime64.sql b/tests/queries/0_stateless/03252_parse_datetime64.sql new file mode 100644 index 00000000000..d28b6e586f7 --- /dev/null +++ b/tests/queries/0_stateless/03252_parse_datetime64.sql @@ -0,0 +1,32 @@ +set session_timezone = 'Asia/Shanghai'; + +select parseDateTime64('2024-10-09 10:30:10.123456'); +select parseDateTime64('2024-10-09 10:30:10.123'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123', 6, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError NOT_ENOUGH_SPACE } + +select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123'); +select parseDateTime64OrZero('2024-10-09 10:30:10', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64OrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); + +select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123'); +select parseDateTime64OrNull('2024-10-09 10:30:10', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64OrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; +select parseDateTime64OrNull('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); \ No newline at end of file diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference index 063b76b152c..0b4a28c4b38 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference @@ -1,14 +1,26 @@ -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 -2024-10-10 02:30:10.123456 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 2024-10-10 01:30:10.123456 -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 -1970-01-01 08:00:00.000000000 -2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 -2024-10-09 10:30:10.123 -2024-10-09 10:30:10.123456 +1970-01-01 08:00:00.000 +1970-01-01 08:00:00.000 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 +1970-01-01 08:00:00.000000 +2024-10-10 01:30:10.123456 +2024-10-10 01:30:10.123456 +1970-01-01 08:00:00.000000 +\N +\N +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 +2024-10-10 02:42:10.123456 \N -2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 +2024-10-10 01:30:10.123456 +\N diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql index 9ea854bc324..8482677e9c9 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql @@ -1,19 +1,49 @@ set session_timezone = 'Asia/Shanghai'; -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -- { serverError CANNOT_PARSE_DATETIME } -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); -- {serverError CANNOT_PARSE_DATETIME} +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 3); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 3); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-08123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz'); +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } \ No newline at end of file From 25f73dfb555423ec88e60b5a25f76be927099022 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 13:37:40 +0330 Subject: [PATCH 105/267] Added "date_time_utc" parameter to config file Signed-off-by: xogoodnow --- programs/server/config.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/programs/server/config.xml b/programs/server/config.xml index 9807f8c0d5a..98b4f47df74 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -70,12 +70,15 @@ You can specify log format(for now, JSON only). In that case, the console log will be printed in specified format like JSON. For example, as below: + {"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"} + {"date_time_utc":"2024-11-06T09:06:09Z","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"} To enable JSON logging support, please uncomment the entire tag below. a) You can modify key names by changing values under tag values inside tag. For example, to change DATE_TIME to MY_DATE_TIME, you can do like: MY_DATE_TIME + date_time_utc b) You can stop unwanted log properties to appear in logs. To do so, you can simply comment out (recommended) that property from this file. For example, if you do not want your log to print query_id, you can comment out only tag. @@ -86,6 +89,7 @@ json date_time + date_time_utc thread_name thread_id level From 9ac9dea447bccac062510f28c5d4d5b915075b58 Mon Sep 17 00:00:00 2001 From: Payam Qorbanpour Date: Thu, 7 Nov 2024 13:48:20 +0330 Subject: [PATCH 106/267] Add 'date_time_utc' format to tests --- .../test_structured_logging_json/test.py | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index a7d048cc4bb..544f81600f6 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -1,8 +1,10 @@ import json +from datetime import datetime from xml.etree import ElementTree as ET import pytest + from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) @@ -58,12 +60,21 @@ def validate_log_level(config, logs): return True +def is_valid_utc_datetime(datetime_str): + try: + datetime_obj = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ") + return datetime_obj.tzinfo is None + except ValueError: + return False + + def validate_log_config_relation(config, logs, config_type): root = ET.fromstring(config) keys_in_config = set() if config_type == "config_no_keys": keys_in_config.add("date_time") + keys_in_config.add("date_time_utc") keys_in_config.add("thread_name") keys_in_config.add("thread_id") keys_in_config.add("level") @@ -85,9 +96,12 @@ def validate_log_config_relation(config, logs, config_type): keys_in_log.add(log_key) if log_key not in keys_in_config: return False - for config_key in keys_in_config: - if config_key not in keys_in_log: - return False + + # Validate the UTC datetime format in "date_time_utc" if it exists + if "date_time_utc" in json_log and not is_valid_utc_datetime( + json_log["date_time_utc"] + ): + return False except ValueError as e: return False return True From 27fc62ae6ac6e67d02836d77d3c83bfa76bdc030 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 14:00:43 +0330 Subject: [PATCH 107/267] Changed custom name for consistency with other example Signed-off-by: xogoodnow --- programs/server/config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index 98b4f47df74..8ec49d804bd 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -78,7 +78,7 @@ a) You can modify key names by changing values under tag values inside tag. For example, to change DATE_TIME to MY_DATE_TIME, you can do like: MY_DATE_TIME - date_time_utc + MY_UTC_DATE_TIME b) You can stop unwanted log properties to appear in logs. To do so, you can simply comment out (recommended) that property from this file. For example, if you do not want your log to print query_id, you can comment out only tag. From a3bfb57da1d8c007c1aabd43f5145214cbc3bdbf Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 15:27:41 +0330 Subject: [PATCH 108/267] Ran black Signed-off-by: xogoodnow --- tests/integration/test_structured_logging_json/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index 544f81600f6..4b3f4eb6b96 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -1,12 +1,10 @@ import json from datetime import datetime from xml.etree import ElementTree as ET +from helpers.cluster import ClickHouseCluster import pytest - -from helpers.cluster import ClickHouseCluster - cluster = ClickHouseCluster(__file__) node_all_keys = cluster.add_instance( "node_all_keys", main_configs=["configs/config_all_keys_json.xml"] From a6b08187b31b3d3d6a5432bf8f39fe81ab5d81a7 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 20:03:44 +0800 Subject: [PATCH 109/267] checkstyle and doc --- .../functions/type-conversion-functions.md | 46 ++++++++++++++++++- src/Functions/parseDateTime.cpp | 27 +++++------ 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 91bae2fe9da..c44d9ddb12b 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6867,9 +6867,53 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed. +## parseDateTime64 + +Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format). + +**Syntax** + +``` sql +parseDateTime64(str[, [scale, [format[, timezone]]]]) +``` + +**Arguments** + +- `str` — The String to be parsed +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. +- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. + +**Returned value(s)** + +Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a MySQL style format string. + +## parseDateTime64OrZero +Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. + +## parseDateTime64OrNull +Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. + ## parseDateTime64InJodaSyntax -Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md). +Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html). + +**Syntax** + +``` sql +parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) +``` + +**Arguments** + +- `str` — The String to be parsed +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. +- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. + +**Returned value(s)** + +Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a joda style format string. ## parseDateTime64InJodaSyntaxOrZero diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 9f7f78dcbe2..5743278e104 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -613,15 +613,12 @@ namespace FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - { - optional_args = { - {"scale/format", static_cast([](const IDataType & data_type) -> bool { - return isUInt(data_type) || isString(data_type); - }), nullptr, "UInt or String"}, + optional_args = {{"scale/format", static_cast( + [](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); } + ), nullptr, "UInt or String"}, {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; - } else optional_args = { {"format", static_cast(&isString), nullptr, "String"}, @@ -659,7 +656,7 @@ namespace } /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by c how many 'S' characters are contained in the format's micorsceond fragment. + /// from the format string by counting how many 'S' characters are contained in the format's micorsceond fragment. String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) @@ -676,7 +673,7 @@ namespace else val++; } - /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, + /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. if (val != 0 && scale != 0 && val != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -687,7 +684,7 @@ namespace scale = val; } if (scale > maxPrecisionOfDateTime64) - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale of the input format string {} exceed the max scale value {}.", format, maxPrecisionOfDateTime64); @@ -709,7 +706,7 @@ namespace non_null_result_type = removeNullable(result_type); else non_null_result_type = result_type; - + if constexpr (return_type == ReturnType::DateTime64) { const auto * datatime64_type = checkAndGetDataType(non_null_result_type.get()); @@ -726,7 +723,7 @@ namespace } template - ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, + ColumnPtr executeImpl2(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, MutableColumnPtr & col_res, PaddedPODArray & res_data) const { const auto * col_str = checkAndGetColumn(arguments[0].column.get()); @@ -736,7 +733,7 @@ namespace "Illegal column {} of first ('str') argument of function {}. Must be string.", arguments[0].column->getName(), getName()); - + ColumnUInt8::MutablePtr col_null_map; if constexpr (error_handling == ErrorHandling::Null) col_null_map = ColumnUInt8::create(input_rows_count, 0); @@ -802,7 +799,7 @@ namespace Int64OrError result = 0; - /// Ensure all input was consumed when the return type is `DateTime`. + /// Ensure all input was consumed. if (cur < end) { result = tl::unexpected(ErrorCodeAndMessage( @@ -2273,7 +2270,7 @@ namespace { /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated /// as default value `yyyy-MM-dd HH:mm:ss`. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, + /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, /// then the second argument is the format. /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, /// then the third argument is the format. @@ -2321,7 +2318,7 @@ namespace /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the - /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as + /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as /// `DateLUT::instance()`. if (isUInt(arguments[1].type) && arguments.size() < 4) return DateLUT::instance(); From b97d78e7f4f9301e3660d31d908b97bba88760e0 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 15:35:17 +0330 Subject: [PATCH 110/267] Third party library must come before local imports (according to isort) Signed-off-by: xogoodnow --- tests/integration/test_structured_logging_json/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index 4b3f4eb6b96..ae244dde71e 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -1,10 +1,10 @@ import json from datetime import datetime from xml.etree import ElementTree as ET -from helpers.cluster import ClickHouseCluster import pytest +from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node_all_keys = cluster.add_instance( "node_all_keys", main_configs=["configs/config_all_keys_json.xml"] From acafa37e2d48eeee04931b029769cd1bbcac2069 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 15:53:18 +0330 Subject: [PATCH 111/267] Ran black for style check Signed-off-by: xogoodnow --- tests/integration/test_structured_logging_json/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index ae244dde71e..bc5f9753f4d 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -5,6 +5,7 @@ from xml.etree import ElementTree as ET import pytest from helpers.cluster import ClickHouseCluster + cluster = ClickHouseCluster(__file__) node_all_keys = cluster.add_instance( "node_all_keys", main_configs=["configs/config_all_keys_json.xml"] From 3332bce1dc94e7fddccc1865df01eb029e5f7e52 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Thu, 7 Nov 2024 20:38:44 +0800 Subject: [PATCH 112/267] fix doc and comments --- .../functions/type-conversion-functions.md | 6 +++--- src/Functions/parseDateTime.cpp | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index c44d9ddb12b..8043b21744a 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6880,7 +6880,7 @@ parseDateTime64(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 6 if not specified. - `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. @@ -6892,7 +6892,7 @@ Returns [DateTime64](../data-types/datetime64.md) type values parsed from input Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. ## parseDateTime64OrNull -Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed. +Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed. ## parseDateTime64InJodaSyntax @@ -6907,7 +6907,7 @@ parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default is 6 if not specified. +- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 0 if not specified. - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 5743278e104..7190c1ad6f8 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -65,7 +65,7 @@ namespace constexpr Int32 minYear = 1970; constexpr Int32 maxYear = 2106; - constexpr Int32 maxPrecisionOfDateTime64 = 6; + constexpr Int32 maxScaleOfDateTime64 = 6; const std::unordered_map> dayOfWeekMap{ {"mon", {"day", 1}}, @@ -193,7 +193,7 @@ namespace Int32 minute = 0; /// range [0, 59] Int32 second = 0; /// range [0, 59] Int32 microsecond = 0; /// range [0, 999999] - UInt32 scale = 0; /// The microsecond scale of DateTime64. + UInt32 scale = 0; /// The scale of DateTime64, range [0, 6]. bool is_am = true; /// If is_hour_of_half_day = true and is_am = false (i.e. pm) then add 12 hours to the result DateTime bool hour_starts_at_1 = false; /// Whether the hour is clockhour @@ -646,8 +646,8 @@ namespace scale = col_scale->getValue(); else throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input scale value may exceed the max value of `DateTime64`: {}.", - maxPrecisionOfDateTime64); + "The input scale value may exceed the max scale value of `DateTime64`: {}.", + maxScaleOfDateTime64); } else { @@ -656,7 +656,7 @@ namespace } /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by counting how many 'S' characters are contained in the format's micorsceond fragment. + /// from the format string by counting how many 'S' characters are contained in the format's microsceond fragment. String format = getFormat(arguments, scale); std::vector instructions = parseFormat(format); for (const auto & instruction : instructions) @@ -683,11 +683,11 @@ namespace else if (scale == 0 && val != 0) scale = val; } - if (scale > maxPrecisionOfDateTime64) + if (scale > maxScaleOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale of the input format string {} exceed the max scale value {}.", format, - maxPrecisionOfDateTime64); + maxScaleOfDateTime64); } data_type = std::make_shared(scale, time_zone_name); } @@ -1398,7 +1398,7 @@ namespace if (date.scale != 6) RETURN_ERROR( ErrorCodes::CANNOT_PARSE_DATETIME, - "Unable to parse fragment {} from {} because of the microsecond's scale {} is not 6", + "Unable to parse fragment {} from {} because of the datetime scale {} is not 6", fragment, std::string_view(cur, end - cur), std::to_string(date.scale)) From aaa46a95c2160eb74d980bd268f38a664658bdb2 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Thu, 7 Nov 2024 18:44:01 +0330 Subject: [PATCH 113/267] Declared the new parameter Signed-off-by: xogoodnow --- src/Loggers/OwnJSONPatternFormatter.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Loggers/OwnJSONPatternFormatter.h b/src/Loggers/OwnJSONPatternFormatter.h index 51827f34b22..ab96c4e5bff 100644 --- a/src/Loggers/OwnJSONPatternFormatter.h +++ b/src/Loggers/OwnJSONPatternFormatter.h @@ -33,6 +33,7 @@ public: private: std::string date_time; + std::string date_time_utc; std::string thread_name; std::string thread_id; std::string level; From a828e3e923ef06666d4582c34868750bbbee3e6a Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 5 Nov 2024 12:59:14 +0000 Subject: [PATCH 114/267] test --- .github/workflows/pr.yaml | 108 ++++++++++++++++---------- ci/jobs/build_clickhouse.py | 30 +++---- ci/jobs/fast_test.py | 2 + ci/jobs/functional_stateless_tests.py | 92 +++++++++++++--------- ci/jobs/scripts/clickhouse_proc.py | 39 +++------- ci/praktika/_environment.py | 12 +-- ci/praktika/cidb.py | 2 +- ci/praktika/digest.py | 20 ++--- ci/praktika/environment.py | 3 - ci/praktika/hook_html.py | 20 ++--- ci/praktika/job.py | 1 + ci/praktika/json.html | 22 +++--- ci/praktika/mangle.py | 55 +++++++------ ci/praktika/native_jobs.py | 6 +- ci/praktika/param.py | 8 -- ci/praktika/result.py | 6 -- ci/praktika/runner.py | 7 +- ci/praktika/utils.py | 27 +++---- ci/praktika/validator.py | 89 ++++++++++----------- ci/praktika/yaml_generator.py | 11 +-- ci/settings/definitions.py | 5 +- ci/workflows/pull_request.py | 54 +++++++++---- tests/clickhouse-test | 30 +++---- tests/config/install.sh | 2 +- tests/docker_scripts/setup_minio.sh | 6 +- 25 files changed, 334 insertions(+), 323 deletions(-) delete mode 100644 ci/praktika/environment.py delete mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 0c3f74aeac8..51bb9b52d10 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -31,8 +31,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -72,8 +71,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -113,8 +111,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -154,8 +151,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -195,8 +191,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -236,8 +231,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -266,19 +260,18 @@ jobs: python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_1_2: + stateless_tests_amd_debugparallel: runs-on: [builder] needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 1/2)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcscGFyYWxsZWwp') }} + name: "Stateless tests (amd_debug,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -302,24 +295,63 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_2_2: + stateless_tests_amd_debugnon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsbm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug,non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_releaseparallel: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 2/2)" + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_release,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -343,24 +375,23 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_non_parallel: - runs-on: [style-checker] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd, debug) (non-parallel)" + stateless_tests_amd_releasenon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxub24tcGFyYWxsZWwp') }} + name: "Stateless tests (amd_release,non-parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -384,14 +415,14 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -400,8 +431,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 3db88938f23..1e6d2c648a7 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,6 +1,5 @@ import argparse -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -16,8 +15,7 @@ def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( "--build-type", - help="Type: __", - default=None, + help="Type: ,,", ) parser.add_argument( "--param", @@ -30,7 +28,7 @@ def parse_args(): CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ -DCMAKE_BUILD_TYPE={BUILD_TYPE} \ -DSANITIZE={SANITIZER} \ --DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 \ -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ {AUX_DEFS} \ @@ -54,33 +52,26 @@ def main(): stages.pop(0) stages.insert(0, stage) - cmake_build_type = "Release" - sanitizer = "" - - if args.build_type and get_param(): - assert ( - False - ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - - build_type = args.build_type or get_param() + build_type = args.build_type assert ( build_type ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" build_type = build_type.lower() - # if Environment.is_local_run(): - # build_cache_type = "disabled" - # else: CACHE_TYPE = "sccache" if "debug" in build_type: print("Build type set: debug") BUILD_TYPE = "Debug" - AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + AUX_DEFS = ( + " -DENABLE_TESTS=1 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + ) elif "release" in build_type: print("Build type set: release") - BUILD_TYPE = "None" - AUX_DEFS = " -DENABLE_TESTS=1 " + BUILD_TYPE = "RelWithDebInfo" + AUX_DEFS = " -DENABLE_TESTS=0 " + else: + assert False if "asan" in build_type: print("Sanitizer set: address") @@ -136,6 +127,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index cb7d925fead..03a4c0cd496 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -215,11 +215,13 @@ def main(): ) if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() step_name = "Tests" print(step_name) res = res and CH.run_fast_test() if res: results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) CH.terminate() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index d77522ed73a..0481086d80a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,15 +1,13 @@ import argparse -import os +import time from pathlib import Path -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -from ci.settings.definitions import azure_secret class JobStages(metaclass=MetaClasses.WithIter): @@ -21,9 +19,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}" ) - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "--test-options", + help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..", + default="", + ) + parser.add_argument("--param", help="Optional job start stage", default=None) return parser.parse_args() @@ -50,28 +53,31 @@ def run_stateless_test( def main(): args = parse_args() - params = get_param().split(" ") - parallel_or_sequential = None - no_parallel = False - no_sequential = False - if params: - parallel_or_sequential = params[0] - if len(params) > 1: - batch_num, total_batches = map(int, params[1].split("/")) - else: - batch_num, total_batches = 0, 0 - if parallel_or_sequential: - no_parallel = parallel_or_sequential == "non-parallel" - no_sequential = parallel_or_sequential == "parallel" + test_options = args.test_options.split(",") + no_parallel = "non-parallel" in test_options + no_sequential = "parallel" in test_options + batch_num, total_batches = 0, 0 + for to in test_options: + if "/" in to: + batch_num, total_batches = map(int, to.split("/")) - os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( - f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", - verbose=True, - ) + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # strict=True + # ) + + ch_path = args.ch_path + assert Path( + ch_path + "/clickhouse" + ).is_file(), f"clickhouse binary not found under [{ch_path}]" stop_watch = Utils.Stopwatch() stages = list(JobStages) + + logs_to_attach = [] + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" @@ -83,19 +89,22 @@ def main(): res = True results = [] - Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + Utils.add_to_PATH(f"{ch_path}:tests") if res and JobStages.INSTALL_CLICKHOUSE in stages: commands = [ - f"chmod +x {Settings.INPUT_DIR}/clickhouse", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"chmod +x {ch_path}/clickhouse", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", # update_path_ch_config, - f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", - f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + # f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + # f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|local_disk|{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done", f"clickhouse-server --version", ] results.append( @@ -110,22 +119,27 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) - res = res and CH.start_minio() + minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_minio(log_file_path=minio_log) + logs_to_attach += [minio_log] + time.sleep(10) + Shell.check("ps -ef | grep minio", verbose=True) + res = res and Shell.check( + "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True + ) res = res and CH.start() res = res and CH.wait_ready() + if res: + print("ch started") + logs_to_attach += [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] results.append( Result.create_from( name=step_name, status=res, stopwatch=stop_watch_, - files=( - [ - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", - ] - if not res - else [] - ), ) ) res = results[-1].is_ok() @@ -144,7 +158,9 @@ def main(): results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() + Result.create_from( + results=results, stopwatch=stop_watch, files=logs_to_attach if not res else [] + ).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index cc822eab693..c43283e75e0 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -1,5 +1,4 @@ -import threading -import time +import subprocess from pathlib import Path from praktika.settings import Settings @@ -39,39 +38,25 @@ class ClickHouseProc: Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - if not fast_test: - with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: - file.write(self.BACKUPS_XML) + # if not fast_test: + # with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + # file.write(self.BACKUPS_XML) self.minio_proc = None - def start_minio(self): - print("Starting minio") - - def run_minio(): - self.minio_proc = Shell.run_async( - self.minio_cmd, verbose=True, suppress_output=True + def start_minio(self, log_file_path): + command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT ) - - thread = threading.Thread(target=run_minio) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - time.sleep(5) - return thread.is_alive() + print(f"Started setup_minio.sh asynchronously with PID {process.pid}") + return True def start(self): print("Starting ClickHouse server") Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=False - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - + self.proc = subprocess.Popen(self.command, stderr=subprocess.STDOUT, shell=True) started = False try: for _ in range(5): diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 4ac8ad319f9..1c6b547ddde 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -30,7 +30,6 @@ class _Environment(MetaClasses.Serializable): INSTANCE_ID: str INSTANCE_LIFE_CYCLE: str LOCAL_RUN: bool = False - PARAMETER: Any = None REPORT_INFO: List[str] = dataclasses.field(default_factory=list) name = "environment" @@ -172,18 +171,15 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self): + def get_report_url(self, settings): import urllib - from praktika.settings import Settings - from praktika.utils import Utils - - path = Settings.HTML_S3_PATH - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): + path = settings.HTML_S3_PATH + for bucket, endpoint in settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/cidb.py b/ci/praktika/cidb.py index 087845ec762..53088c102cd 100644 --- a/ci/praktika/cidb.py +++ b/ci/praktika/cidb.py @@ -52,7 +52,7 @@ class CIDB: check_status=result.status, check_duration_ms=int(result.duration * 1000), check_start_time=Utils.timestamp_to_str(result.start_time), - report_url=env.get_report_url(), + report_url=env.get_report_url(settings=Settings), pull_request_url=env.CHANGE_URL, base_ref=env.BASE_BRANCH, base_repo=env.REPOSITORY, diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index 93b62b13dc0..a1f2eecf9b6 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -31,6 +31,9 @@ class Digest: cache_key = self._hash_digest_config(config) if cache_key in self.digest_cache: + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" + ) return self.digest_cache[cache_key] included_files = Utils.traverse_paths( @@ -38,12 +41,9 @@ class Digest: job_config.digest_config.exclude_paths, sorted=True, ) - print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" ) - # Sort files to ensure consistent hash calculation - included_files.sort() # Calculate MD5 hash res = "" @@ -52,11 +52,11 @@ class Digest: print(f"NOTE: empty digest config [{config}] - return dummy digest") else: hash_md5 = hashlib.md5() - for file_path in included_files: - res = self._calc_file_digest(file_path, hash_md5) - assert res - self.digest_cache[cache_key] = res - return res + for i, file_path in enumerate(included_files): + hash_md5 = self._calc_file_digest(file_path, hash_md5) + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + return digest def calc_docker_digest( self, @@ -103,10 +103,10 @@ class Digest: print( f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation" ) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 with open(resolved_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 diff --git a/ci/praktika/environment.py b/ci/praktika/environment.py deleted file mode 100644 index 8f53aa6230b..00000000000 --- a/ci/praktika/environment.py +++ /dev/null @@ -1,3 +0,0 @@ -from praktika._environment import _Environment - -Environment = _Environment.get() diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index cea84192d0d..ca2692d1b22 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -1,6 +1,5 @@ import dataclasses import json -import urllib.parse from pathlib import Path from typing import List @@ -132,17 +131,9 @@ class HtmlRunnerHooks: result = Result.generate_skipped(job.name) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) - summary_result.aux_links.append(env.CHANGE_URL) - summary_result.aux_links.append(env.RUN_URL) + summary_result.links.append(env.CHANGE_URL) + summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - page_url = "/".join( - ["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)] - ) - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): - page_url = page_url.replace(bucket, endpoint) - # TODO: add support for non-PRs (use branch?) - page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}" - summary_result.html_link = page_url # clean the previous latest results in PR if any if env.PR_NUMBER: @@ -152,13 +143,14 @@ class HtmlRunnerHooks: unlock=False, ) + page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") res1 = GH.post_commit_status( name=_workflow.name, status=Result.Status.PENDING, description="", - url=page_url, + url=env.get_report_url(settings=Settings), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -248,11 +240,11 @@ class HtmlRunnerHooks: ) if workflow_result.status != old_status: print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]" + f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" ) GH.post_commit_status( name=workflow_result.name, status=GH.convert_to_gh_status(workflow_result.status), description="", - url=workflow_result.html_link, + url=env.get_report_url(settings=Settings), ) diff --git a/ci/praktika/job.py b/ci/praktika/job.py index 99eb08938b8..595a86456e9 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -89,6 +89,7 @@ class Job: ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ + obj.command = obj.command.format(PARAMETER=parameter_) if runs_on_: obj.runs_on = runs_on_ if timeout_: diff --git a/ci/praktika/json.html b/ci/praktika/json.html index f86a7b27ecb..4e15a67ba76 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -663,20 +663,20 @@ let targetData = navigatePath(data, nameParams); let nest_level = nameParams.length; + // Add footer links from top-level Result + if (Array.isArray(data.links) && data.links.length > 0) { + data.links.forEach(link => { + const a = document.createElement('a'); + a.href = link; + a.textContent = link.split('/').pop(); + a.target = '_blank'; + footerRight.appendChild(a); + }); + } + if (targetData) { infoElement.style.display = 'none'; - // Handle footer links if present - if (Array.isArray(data.aux_links) && data.aux_links.length > 0) { - data.aux_links.forEach(link => { - const a = document.createElement('a'); - a.href = link; - a.textContent = link.split('/').pop(); - a.target = '_blank'; - footerRight.appendChild(a); - }); - } - addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) // Handle links diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index bca33f9e660..b16d52fbbbf 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -14,35 +14,34 @@ def _get_workflows(name=None, file=None): """ res = [] - with ContextManager.cd(): - directory = Path(_Settings.WORKFLOWS_DIRECTORY) - for py_file in directory.glob("*.py"): - if file and file not in str(py_file): - continue - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - try: - for workflow in foo.WORKFLOWS: - if name: - if name == workflow.name: - print(f"Read workflow [{name}] config from [{module_name}]") - res = [workflow] - break - else: - continue + directory = Path(_Settings.WORKFLOWS_DIRECTORY) + for py_file in directory.glob("*.py"): + if file and file not in str(py_file): + continue + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + try: + for workflow in foo.WORKFLOWS: + if name: + if name == workflow.name: + print(f"Read workflow [{name}] config from [{module_name}]") + res = [workflow] + break else: - res += foo.WORKFLOWS - print(f"Read workflow configs from [{module_name}]") - except Exception as e: - print( - f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" - ) + continue + else: + res += foo.WORKFLOWS + print(f"Read workflow configs from [{module_name}]") + except Exception as e: + print( + f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" + ) if not res: Utils.raise_with_error(f"Failed to find workflow [{name or file}]") diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 16ffa9056e9..58af211988b 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -342,7 +342,7 @@ def _finish_workflow(workflow, job_name): f"NOTE: Result for [{result.name}] has not ok status [{result.status}]" ) ready_for_merge_status = Result.Status.FAILED - failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name + failed_results.append(result.name) if failed_results: ready_for_merge_description = f"failed: {', '.join(failed_results)}" @@ -362,9 +362,7 @@ def _finish_workflow(workflow, job_name): unlock=False, ) # no lock - no unlock - Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info( - ready_for_merge_description - ) + Result.from_fs(job_name).set_status(Result.Status.SUCCESS) if __name__ == "__main__": diff --git a/ci/praktika/param.py b/ci/praktika/param.py deleted file mode 100644 index f5727198e0d..00000000000 --- a/ci/praktika/param.py +++ /dev/null @@ -1,8 +0,0 @@ -from praktika._environment import _Environment - - -# TODO: find better place and/or right storage for parameter -def get_param(): - env = _Environment.get() - assert env.PARAMETER - return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index f473cf3ed05..842deacbcbd 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -26,10 +26,6 @@ class Result(MetaClasses.Serializable): files (List[str]): A list of file paths or names related to the result. links (List[str]): A list of URLs related to the result (e.g., links to reports or resources). info (str): Additional information about the result. Free-form text. - # TODO: rename - aux_links (List[str]): A list of auxiliary links that provide additional context for the result. - # TODO: remove - html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page). Inner Class: Status: Defines possible statuses for the task, such as "success", "failure", etc. @@ -51,8 +47,6 @@ class Result(MetaClasses.Serializable): files: List[str] = dataclasses.field(default_factory=list) links: List[str] = dataclasses.field(default_factory=list) info: str = "" - aux_links: List[str] = dataclasses.field(default_factory=list) - html_link: str = "" @staticmethod def create_from( diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 5db1a89ce99..1ac8748d1c0 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -80,7 +80,6 @@ class Runner: print("Read GH Environment") env = _Environment.from_env() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() print(env) @@ -128,7 +127,6 @@ class Runner: # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() if param: @@ -143,6 +141,7 @@ class Runner: job.run_in_docker.split("+")[1:], ) from_root = "root" in docker_settings + settings = [s for s in docker_settings if s.startswith("--")] if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -154,9 +153,11 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {' '.join(settings)} {docker} {job.command}" else: cmd = job.command + python_path = os.getenv("PYTHONPATH", ":") + os.environ["PYTHONPATH"] = f".:{python_path}" if param: print(f"Custom --param [{param}] will be passed to job's script") diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index b96c78e4fa7..62eb13b3e19 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -81,25 +81,26 @@ class MetaClasses: class ContextManager: @staticmethod @contextmanager - def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]: + def cd(to: Optional[Union[Path, str]]) -> Iterator[None]: """ changes current working directory to @path or `git root` if @path is None :param to: :return: """ - if not to: - try: - to = Shell.get_output_or_raise("git rev-parse --show-toplevel") - except: - pass - if not to: - if Path(_Settings.DOCKER_WD).is_dir(): - to = _Settings.DOCKER_WD - if not to: - assert False, "FIX IT" - assert to + # if not to: + # try: + # to = Shell.get_output_or_raise("git rev-parse --show-toplevel") + # except: + # pass + # if not to: + # if Path(_Settings.DOCKER_WD).is_dir(): + # to = _Settings.DOCKER_WD + # if not to: + # assert False, "FIX IT" + # assert to old_pwd = os.getcwd() - os.chdir(to) + if to: + os.chdir(to) try: yield finally: diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index 29edc0a27ed..d612881b819 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -119,61 +119,58 @@ class Validator: def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - run_command = job.command - command_parts = run_command.split(" ") - for part in command_parts: - if ">" in part: - return - if "/" in part: - assert ( - Path(part).is_file() or Path(part).is_dir() - ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + run_command = job.command + command_parts = run_command.split(" ") + for part in command_parts: + if ">" in part: + return + if "/" in part: + assert ( + Path(part).is_file() or Path(part).is_dir() + ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - if not job.digest_config: - continue - for include_path in chain( - job.digest_config.include_paths, job.digest_config.exclude_paths - ): - if "*" in include_path: - assert glob.glob( - include_path, recursive=True - ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" - else: - assert ( - Path(include_path).is_file() or Path(include_path).is_dir() - ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + if not job.digest_config: + continue + for include_path in chain( + job.digest_config.include_paths, job.digest_config.exclude_paths + ): + if "*" in include_path: + assert glob.glob( + include_path, recursive=True + ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + else: + assert ( + Path(include_path).is_file() or Path(include_path).is_dir() + ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None: - with ContextManager.cd(): - for job in workflow.jobs: - if job.job_requirements: - if job.job_requirements.python_requirements_txt: - path = Path(job.job_requirements.python_requirements_txt) - message = f"File with py requirement [{path}] does not exist" - if job.name in ( - Settings.DOCKER_BUILD_JOB_NAME, - Settings.CI_CONFIG_JOB_NAME, - Settings.FINISH_WORKFLOW_JOB_NAME, - ): - message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' - message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" - message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" - message += ( - "\n echo requests==2.32.3 >> ./ci/requirements.txt" - ) - message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name + for job in workflow.jobs: + if job.job_requirements: + if job.job_requirements.python_requirements_txt: + path = Path(job.job_requirements.python_requirements_txt) + message = f"File with py requirement [{path}] does not exist" + if job.name in ( + Settings.DOCKER_BUILD_JOB_NAME, + Settings.CI_CONFIG_JOB_NAME, + Settings.FINISH_WORKFLOW_JOB_NAME, + ): + message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' + message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" + message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" + message += ( + "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) + message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" + cls.evaluate_check( + path.is_file(), message, job.name, workflow.name + ) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 1422a835663..f56715755e8 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -81,8 +81,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{{{ github.event.pull_reguest.head.sha }}}} + ref: ${{{{ github.head_ref }}}} {JOB_ADDONS} - name: Prepare env script run: | @@ -191,12 +190,10 @@ jobs: False ), f"Workflow event not yet supported [{workflow_config.event}]" - with ContextManager.cd(): - with open(self._get_workflow_file_name(workflow_config.name), "w") as f: - f.write(yaml_workflow_str) + with open(self._get_workflow_file_name(workflow_config.name), "w") as f: + f.write(yaml_workflow_str) - with ContextManager.cd(): - Shell.check("git add ./.github/workflows/*.yaml") + Shell.check("git add ./.github/workflows/*.yaml") class PullRequestPushYamlGen: diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 33173756924..99fec8b5402 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,7 +8,7 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" - STYLE_CHECKER = "style-checker" + FUNC_TESTER_AMD = "func-tester" BASE_BRANCH = "master" @@ -238,5 +238,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD = "Build" - BUILD_AMD_DEBUG = "Build (amd, debug)" - STATELESS_TESTS = "Stateless tests (amd, debug)" + STATELESS = "Stateless tests" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 10dd77a0414..0d505ae27c4 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -41,8 +41,9 @@ fast_test_job = Job.Config( amd_build_jobs = Job.Config( name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py", + command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}", run_in_docker="clickhouse/fasttest", + timeout=3600 * 2, digest_config=Job.CacheDigestConfig( include_paths=[ "./src", @@ -55,6 +56,7 @@ amd_build_jobs = Job.Config( "./docker/packager/packager", "./rust", "./tests/ci/version_helper.py", + "./ci/jobs/build_clickhouse.py", ], ), ).parametrize( @@ -62,27 +64,53 @@ amd_build_jobs = Job.Config( provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -statless_batch_num = 2 -stateless_tests_amd_debug_jobs = Job.Config( - name=JobNames.STATELESS_TESTS, +stateless_tests_jobs = Job.Config( + name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/stateless-test", + command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.CH_AMD_DEBUG], ).parametrize( parameter=[ - f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) - ] - + ["non-parallel"], - runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] - + [[RunnerLabels.STYLE_CHECKER]], + "amd_debug,parallel", + "amd_debug,non-parallel", + "amd_release,parallel", + "amd_release,non-parallel", + ], + runs_on=[ + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + ], + requires=[ + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_RELEASE], + [ArtifactNames.CH_AMD_RELEASE], + ], ) +# stateless_tests_amd_release_jobs = Job.Config( +# name=JobNames.STATELESS_AMD_RELEASE, +# runs_on=[RunnerLabels.BUILDER], +# command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", +# run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", +# digest_config=Job.CacheDigestConfig( +# include_paths=[ +# "./ci/jobs/functional_stateless_tests.py", +# ], +# ), +# requires=[ArtifactNames.CH_AMD_RELEASE], +# ).parametrize( +# parameter=["parallel", "non-parallel"], +# runs_on=[[RunnerLabels.BUILDER], [RunnerLabels.FUNC_TESTER_AMD]], +# ) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -91,7 +119,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, *amd_build_jobs, - *stateless_tests_amd_debug_jobs, + *stateless_tests_jobs, ], artifacts=[ Artifact.Config( diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 3396b10814a..a0ec080ed75 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2619,14 +2619,14 @@ def run_tests_process(*args, **kwargs): def do_run_tests(jobs, test_suite: TestSuite): - if jobs > 1 and len(test_suite.parallel_tests) > 0: - print( - "Found", - len(test_suite.parallel_tests), - "parallel tests and", - len(test_suite.sequential_tests), - "sequential tests", - ) + print( + "Found", + len(test_suite.parallel_tests), + "parallel tests and", + len(test_suite.sequential_tests), + "sequential tests", + ) + if test_suite.parallel_tests: tests_n = len(test_suite.parallel_tests) jobs = min(jobs, tests_n) @@ -2639,6 +2639,7 @@ def do_run_tests(jobs, test_suite: TestSuite): # It makes it more difficult to detect real flaky tests, # because the distribution and the amount # of failures will be nearly the same for all tests from the group. + # TODO: add shuffle for sequential tests random.shuffle(test_suite.parallel_tests) batch_size = len(test_suite.parallel_tests) // jobs @@ -2684,6 +2685,7 @@ def do_run_tests(jobs, test_suite: TestSuite): if not p.is_alive(): processes.remove(p) + if test_suite.sequential_tests: run_tests_array( ( test_suite.sequential_tests, @@ -2693,17 +2695,7 @@ def do_run_tests(jobs, test_suite: TestSuite): ) ) - return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) - num_tests = len(test_suite.all_tests) - run_tests_array( - ( - test_suite.all_tests, - num_tests, - test_suite, - False, - ) - ) - return num_tests + return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) def is_test_from_dir(suite_dir, case): diff --git a/tests/config/install.sh b/tests/config/install.sh index cdae5741fce..9630977b9c1 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -200,7 +200,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then fi if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then - ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ + #ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 837c05a9c5d..88839c39674 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -4,8 +4,10 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +TEST_DIR=${2:-/repo/tests/} if [ -d "$TEMP_DIR" ]; then + TEST_DIR=$(readlink -f $TEST_DIR) cd "$TEMP_DIR" # add / for minio mc in docker PATH="/:.:$PATH" @@ -79,7 +81,7 @@ start_minio() { pwd mkdir -p ./minio_data minio --version - minio server --address ":11111" ./minio_data & + nohup minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -153,7 +155,7 @@ main() { fi start_minio setup_minio "$1" - upload_data "${query_dir}" "${2:-/repo/tests/}" + upload_data "${query_dir}" "$TEST_DIR" setup_aws_credentials } From a8d07555d4d01e1f261dd5c6c6f003a5581c2339 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 12:31:22 +0000 Subject: [PATCH 115/267] Update 02967_parallel_replicas_joins_and_analyzer EXPLAIN with RIGHT JOIN changed --- ...llel_replicas_joins_and_analyzer.reference | 99 +++++++------------ 1 file changed, 35 insertions(+), 64 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index 93003b6cf6d..1269f792e76 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -266,24 +266,13 @@ Expression Join Expression Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + ReadFromMemoryStorage + Expression + Expression + ReadFromMergeTree + Expression + ReadFromMemoryStorage -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -317,27 +306,19 @@ select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_paralle Expression Sorting Expression - Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Sorting Expression Join - Union - Expression + Expression + ReadFromMemoryStorage + Expression + Join Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression + Expression + ReadFromMergeTree Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromMergeTree -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -722,28 +703,22 @@ sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -Expression - Join - Expression - Join - Union +Union + Expression + Join + Expression + Join Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromRemoteParallelReplicas -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -776,28 +751,24 @@ sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll. select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting - Expression - Join - Union + Union + Expression + Sorting Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Expression - Join - Union + Join Expression Expression ReadFromMergeTree Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Join + Expression + Expression + ReadFromMergeTree + Expression + Expression + ReadFromMergeTree + Expression + ReadFromRemoteParallelReplicas -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), From 1561a0115fa740c746ccb054552de3ad751e12ae Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 14:30:02 +0000 Subject: [PATCH 116/267] Fix test, set min_bytes_to_use_direct_io expicitly --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 166910d496f..aca4fc6b6c3 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -1,6 +1,8 @@ drop table if exists X sync; drop table if exists Y sync; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 + create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); @@ -10,7 +12,6 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; -set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 37c24838693e573428414016a619fa70de61823a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 17:09:23 +0000 Subject: [PATCH 117/267] Do not randomize min_bytes_to_use_direct_io --- tests/clickhouse-test | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 9c035b7cc35..a1ffcc2030f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -821,9 +821,10 @@ class SettingsRandomizer: "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), "use_uncompressed_cache": lambda: random.randint(0, 1), - "min_bytes_to_use_direct_io": threshold_generator( - 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 - ), + # see https://github.com/ClickHouse/ClickHouse/issues/65690 + # "min_bytes_to_use_direct_io": threshold_generator( + # 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + # ), "min_bytes_to_use_mmap_io": threshold_generator( 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 ), From c3f42b7bc770e5e8104527011f6bc51d5b8469ff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 01:25:25 +0100 Subject: [PATCH 118/267] Something --- cmake/linux/default_libs.cmake | 3 +- src/AggregateFunctions/ReservoirSampler.h | 2 +- src/Columns/ColumnUnique.cpp | 1 + src/Columns/ColumnUnique.h | 1 + src/Columns/IColumn.cpp | 1 + src/Common/FieldVisitorConvertToNumber.cpp | 2 +- src/Common/FieldVisitorConvertToNumber.h | 1 + src/DataTypes/DataTypesBinaryEncoding.cpp | 5 + src/DataTypes/DataTypesNumber.cpp | 1 + src/DataTypes/DataTypesNumber.h | 1 + src/Formats/JSONExtractTree.cpp | 4 +- src/Functions/FunctionBinaryArithmetic.h | 1 + src/Functions/FunctionsConversion.cpp | 1 + src/Functions/FunctionsRound.h | 2 +- src/IO/readFloatText.cpp | 9 ++ src/IO/readFloatText.h | 111 ++++++++++++++++-- .../Impl/Parquet/ParquetDataValuesReader.cpp | 2 + .../Impl/Parquet/ParquetLeafColReader.cpp | 1 + 18 files changed, 132 insertions(+), 17 deletions(-) diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 51620bc9f33..79875e1ed6b 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -3,8 +3,7 @@ set (DEFAULT_LIBS "-nodefaultlibs") -# We need builtins from Clang's RT even without libcxx - for ubsan+int128. -# See https://bugs.llvm.org/show_bug.cgi?id=16404 +# We need builtins from Clang execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY diff --git a/src/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h index 2668e0dc890..870cb429fb7 100644 --- a/src/AggregateFunctions/ReservoirSampler.h +++ b/src/AggregateFunctions/ReservoirSampler.h @@ -276,6 +276,6 @@ private: { if (OnEmpty == ReservoirSamplerOnEmpty::THROW) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler"); - return NanLikeValueConstructor>::getValue(); + return NanLikeValueConstructor>::getValue(); } }; diff --git a/src/Columns/ColumnUnique.cpp b/src/Columns/ColumnUnique.cpp index 54f45204c00..773edbfd590 100644 --- a/src/Columns/ColumnUnique.cpp +++ b/src/Columns/ColumnUnique.cpp @@ -16,6 +16,7 @@ template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; +template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; template class ColumnUnique; diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index ffa7c311e9e..ce7bbf0766f 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -760,6 +760,7 @@ extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; +extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; extern template class ColumnUnique; diff --git a/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp index c9a0514af4e..4a3886dddb6 100644 --- a/src/Columns/IColumn.cpp +++ b/src/Columns/IColumn.cpp @@ -443,6 +443,7 @@ template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; +template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; template class IColumnHelper, ColumnFixedSizeHelper>; diff --git a/src/Common/FieldVisitorConvertToNumber.cpp b/src/Common/FieldVisitorConvertToNumber.cpp index 75b3fbfe02a..a5963e3d028 100644 --- a/src/Common/FieldVisitorConvertToNumber.cpp +++ b/src/Common/FieldVisitorConvertToNumber.cpp @@ -1,5 +1,4 @@ #include -#include "base/Decimal.h" namespace DB { @@ -17,6 +16,7 @@ template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; +//template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; template class FieldVisitorConvertToNumber; diff --git a/src/Common/FieldVisitorConvertToNumber.h b/src/Common/FieldVisitorConvertToNumber.h index 638b8805b6a..38d5dc473c4 100644 --- a/src/Common/FieldVisitorConvertToNumber.h +++ b/src/Common/FieldVisitorConvertToNumber.h @@ -129,6 +129,7 @@ extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; +//extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; extern template class FieldVisitorConvertToNumber; diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index dc0f2f3f5aa..c3190b462c3 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -96,6 +96,7 @@ enum class BinaryTypeIndex : uint8_t SimpleAggregateFunction = 0x2E, Nested = 0x2F, JSON = 0x30, + BFloat16 = 0x31, }; /// In future we can introduce more arguments in the JSON data type definition. @@ -151,6 +152,8 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) return BinaryTypeIndex::Int128; case TypeIndex::Int256: return BinaryTypeIndex::Int256; + case TypeIndex::BFloat16: + return BinaryTypeIndex::BFloat16; case TypeIndex::Float32: return BinaryTypeIndex::Float32; case TypeIndex::Float64: @@ -565,6 +568,8 @@ DataTypePtr decodeDataType(ReadBuffer & buf) return std::make_shared(); case BinaryTypeIndex::Int256: return std::make_shared(); + case BinaryTypeIndex::BFloat16: + return std::make_shared(); case BinaryTypeIndex::Float32: return std::make_shared(); case BinaryTypeIndex::Float64: diff --git a/src/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp index 5972cebbca1..4c8918521fe 100644 --- a/src/DataTypes/DataTypesNumber.cpp +++ b/src/DataTypes/DataTypesNumber.cpp @@ -112,6 +112,7 @@ template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; +template class DataTypeNumber; template class DataTypeNumber; template class DataTypeNumber; diff --git a/src/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h index 29899847c4b..a9e77e01b13 100644 --- a/src/DataTypes/DataTypesNumber.h +++ b/src/DataTypes/DataTypesNumber.h @@ -63,6 +63,7 @@ extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; +extern template class DataTypeNumber; extern template class DataTypeNumber; extern template class DataTypeNumber; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index ae6051823b7..62905a2e630 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -131,7 +131,7 @@ bool tryGetNumericValueFromJSONElement( switch (element.type()) { case ElementType::DOUBLE: - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { /// We permit inaccurate conversion of double to float. /// Example: double 0.1 from JSON is not representable in float. @@ -175,7 +175,7 @@ bool tryGetNumericValueFromJSONElement( return false; auto rb = ReadBufferFromMemory{element.getString()}; - if constexpr (std::is_floating_point_v) + if constexpr (is_floating_point) { if (!tryReadFloatText(value, rb) || !rb.eof()) { diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index df239b820af..854b40df441 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -110,6 +110,7 @@ template constexpr bool IsIntegralOrExtendedOrDecimal = IsDataTypeDecimal; template constexpr bool IsFloatingPoint = false; +template <> inline constexpr bool IsFloatingPoint = true; template <> inline constexpr bool IsFloatingPoint = true; template <> inline constexpr bool IsFloatingPoint = true; diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 70ec390b576..1c662dd1d9a 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -2930,6 +2930,7 @@ template <> struct FunctionTo { using Type = FunctionToInt32; }; template <> struct FunctionTo { using Type = FunctionToInt64; }; template <> struct FunctionTo { using Type = FunctionToInt128; }; template <> struct FunctionTo { using Type = FunctionToInt256; }; +//template <> struct FunctionTo { using Type = FunctionToBFloat16; }; template <> struct FunctionTo { using Type = FunctionToFloat32; }; template <> struct FunctionTo { using Type = FunctionToFloat64; }; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 809905c692e..255eca5b406 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -694,7 +694,7 @@ public: if (arguments.size() > 1) { const ColumnWithTypeAndName & scale_column = arguments[1]; - res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); + res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); return true; } res = Dispatcher::template apply(value_arg.column.get()); diff --git a/src/IO/readFloatText.cpp b/src/IO/readFloatText.cpp index 17ccc1b25b7..fb3c86fd7b6 100644 --- a/src/IO/readFloatText.cpp +++ b/src/IO/readFloatText.cpp @@ -47,26 +47,35 @@ void assertNaN(ReadBuffer & buf) } +template void readFloatTextPrecise(BFloat16 &, ReadBuffer &); template void readFloatTextPrecise(Float32 &, ReadBuffer &); template void readFloatTextPrecise(Float64 &, ReadBuffer &); +template bool tryReadFloatTextPrecise(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextPrecise(Float32 &, ReadBuffer &); template bool tryReadFloatTextPrecise(Float64 &, ReadBuffer &); +template void readFloatTextFast(BFloat16 &, ReadBuffer &); template void readFloatTextFast(Float32 &, ReadBuffer &); template void readFloatTextFast(Float64 &, ReadBuffer &); +template bool tryReadFloatTextFast(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextFast(Float32 &, ReadBuffer &); template bool tryReadFloatTextFast(Float64 &, ReadBuffer &); +template void readFloatTextSimple(BFloat16 &, ReadBuffer &); template void readFloatTextSimple(Float32 &, ReadBuffer &); template void readFloatTextSimple(Float64 &, ReadBuffer &); +template bool tryReadFloatTextSimple(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextSimple(Float32 &, ReadBuffer &); template bool tryReadFloatTextSimple(Float64 &, ReadBuffer &); +template void readFloatText(BFloat16 &, ReadBuffer &); template void readFloatText(Float32 &, ReadBuffer &); template void readFloatText(Float64 &, ReadBuffer &); +template bool tryReadFloatText(BFloat16 &, ReadBuffer &); template bool tryReadFloatText(Float32 &, ReadBuffer &); template bool tryReadFloatText(Float64 &, ReadBuffer &); +template bool tryReadFloatTextNoExponent(BFloat16 &, ReadBuffer &); template bool tryReadFloatTextNoExponent(Float32 &, ReadBuffer &); template bool tryReadFloatTextNoExponent(Float64 &, ReadBuffer &); diff --git a/src/IO/readFloatText.h b/src/IO/readFloatText.h index c2fec9d4b0b..a7fd6058dd9 100644 --- a/src/IO/readFloatText.h +++ b/src/IO/readFloatText.h @@ -222,7 +222,6 @@ ReturnType readFloatTextPreciseImpl(T & x, ReadBuffer & buf) break; } - char tmp_buf[MAX_LENGTH]; int num_copied_chars = 0; @@ -597,22 +596,85 @@ ReturnType readFloatTextSimpleImpl(T & x, ReadBuffer & buf) return ReturnType(true); } -template void readFloatTextPrecise(T & x, ReadBuffer & in) { readFloatTextPreciseImpl(x, in); } -template bool tryReadFloatTextPrecise(T & x, ReadBuffer & in) { return readFloatTextPreciseImpl(x, in); } +template void readFloatTextPrecise(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextPreciseImpl(tmp, in); + x = BFloat16(tmp); + } + else + readFloatTextPreciseImpl(x, in); +} + +template bool tryReadFloatTextPrecise(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextPreciseImpl(tmp, in); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextPreciseImpl(x, in); +} template void readFloatTextFast(T & x, ReadBuffer & in) { bool has_fractional; - readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextFastImpl(tmp, in, has_fractional); + x = BFloat16(tmp); + } + else + readFloatTextFastImpl(x, in, has_fractional); } + template bool tryReadFloatTextFast(T & x, ReadBuffer & in) { bool has_fractional; - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } -template void readFloatTextSimple(T & x, ReadBuffer & in) { readFloatTextSimpleImpl(x, in); } -template bool tryReadFloatTextSimple(T & x, ReadBuffer & in) { return readFloatTextSimpleImpl(x, in); } +template void readFloatTextSimple(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + readFloatTextSimpleImpl(tmp, in); + x = BFloat16(tmp); + } + else + readFloatTextSimpleImpl(x, in); +} + +template bool tryReadFloatTextSimple(T & x, ReadBuffer & in) +{ + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextSimpleImpl(tmp, in); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextSimpleImpl(x, in); +} /// Implementation that is selected as default. @@ -624,18 +686,47 @@ template bool tryReadFloatText(T & x, ReadBuffer & in) { return try template bool tryReadFloatTextNoExponent(T & x, ReadBuffer & in) { bool has_fractional; - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + + } + else + return readFloatTextFastImpl(x, in, has_fractional); } /// With a @has_fractional flag /// Used for input_format_try_infer_integers template bool tryReadFloatTextExt(T & x, ReadBuffer & in, bool & has_fractional) { - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } + template bool tryReadFloatTextExtNoExponent(T & x, ReadBuffer & in, bool & has_fractional) { - return readFloatTextFastImpl(x, in, has_fractional); + if constexpr (std::is_same_v) + { + Float32 tmp; + bool res = readFloatTextFastImpl(tmp, in, has_fractional); + if (res) + x = BFloat16(tmp); + return res; + } + else + return readFloatTextFastImpl(x, in, has_fractional); } } diff --git a/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp index b471989076b..4b79be98810 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetDataValuesReader.cpp @@ -580,6 +580,7 @@ template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; +template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader; template class ParquetPlainValuesReader>; @@ -602,6 +603,7 @@ template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; +template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader; template class ParquetRleDictReader>; diff --git a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp index c3c7db510ed..328dd37107e 100644 --- a/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp +++ b/src/Processors/Formats/Impl/Parquet/ParquetLeafColReader.cpp @@ -644,6 +644,7 @@ template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; +template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; template class ParquetLeafColReader; From 1da6e1fffa8e5cc40d71fee52d6f2742a59d8f21 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:25:29 +0100 Subject: [PATCH 119/267] Conversions --- src/Functions/FunctionsConversion.cpp | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 1c662dd1d9a..f37dff35862 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -7,10 +7,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -73,8 +71,10 @@ #include #include + namespace DB { + namespace Setting { extern const SettingsBool cast_ipv4_ipv6_default_on_conversion_error; @@ -1862,6 +1862,11 @@ struct ConvertImpl } } + if constexpr ((std::is_same_v || std::is_same_v) + && !(std::is_same_v || std::is_same_v)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", + TypeName, TypeName); + if constexpr (std::is_same_v || std::is_same_v) { @@ -2875,6 +2880,7 @@ struct NameToInt32 { static constexpr auto name = "toInt32"; }; struct NameToInt64 { static constexpr auto name = "toInt64"; }; struct NameToInt128 { static constexpr auto name = "toInt128"; }; struct NameToInt256 { static constexpr auto name = "toInt256"; }; +struct NameToBFloat16 { static constexpr auto name = "toBFloat16"; }; struct NameToFloat32 { static constexpr auto name = "toFloat32"; }; struct NameToFloat64 { static constexpr auto name = "toFloat64"; }; struct NameToUUID { static constexpr auto name = "toUUID"; }; @@ -2893,6 +2899,7 @@ using FunctionToInt32 = FunctionConvert>; using FunctionToInt128 = FunctionConvert>; using FunctionToInt256 = FunctionConvert>; +using FunctionToBFloat16 = FunctionConvert>; using FunctionToFloat32 = FunctionConvert>; using FunctionToFloat64 = FunctionConvert>; @@ -2930,7 +2937,7 @@ template <> struct FunctionTo { using Type = FunctionToInt32; }; template <> struct FunctionTo { using Type = FunctionToInt64; }; template <> struct FunctionTo { using Type = FunctionToInt128; }; template <> struct FunctionTo { using Type = FunctionToInt256; }; -//template <> struct FunctionTo { using Type = FunctionToBFloat16; }; +template <> struct FunctionTo { using Type = FunctionToBFloat16; }; template <> struct FunctionTo { using Type = FunctionToFloat32; }; template <> struct FunctionTo { using Type = FunctionToFloat64; }; @@ -2973,6 +2980,7 @@ struct NameToInt32OrZero { static constexpr auto name = "toInt32OrZero"; }; struct NameToInt64OrZero { static constexpr auto name = "toInt64OrZero"; }; struct NameToInt128OrZero { static constexpr auto name = "toInt128OrZero"; }; struct NameToInt256OrZero { static constexpr auto name = "toInt256OrZero"; }; +struct NameToBFloat16OrZero { static constexpr auto name = "toBFloat16OrZero"; }; struct NameToFloat32OrZero { static constexpr auto name = "toFloat32OrZero"; }; struct NameToFloat64OrZero { static constexpr auto name = "toFloat64OrZero"; }; struct NameToDateOrZero { static constexpr auto name = "toDateOrZero"; }; @@ -2999,6 +3007,7 @@ using FunctionToInt32OrZero = FunctionConvertFromString; using FunctionToInt128OrZero = FunctionConvertFromString; using FunctionToInt256OrZero = FunctionConvertFromString; +using FunctionToBFloat16OrZero = FunctionConvertFromString; using FunctionToFloat32OrZero = FunctionConvertFromString; using FunctionToFloat64OrZero = FunctionConvertFromString; using FunctionToDateOrZero = FunctionConvertFromString; @@ -3025,6 +3034,7 @@ struct NameToInt32OrNull { static constexpr auto name = "toInt32OrNull"; }; struct NameToInt64OrNull { static constexpr auto name = "toInt64OrNull"; }; struct NameToInt128OrNull { static constexpr auto name = "toInt128OrNull"; }; struct NameToInt256OrNull { static constexpr auto name = "toInt256OrNull"; }; +struct NameToBFloat16OrNull { static constexpr auto name = "toBFloat16OrNull"; }; struct NameToFloat32OrNull { static constexpr auto name = "toFloat32OrNull"; }; struct NameToFloat64OrNull { static constexpr auto name = "toFloat64OrNull"; }; struct NameToDateOrNull { static constexpr auto name = "toDateOrNull"; }; @@ -3051,6 +3061,7 @@ using FunctionToInt32OrNull = FunctionConvertFromString; using FunctionToInt128OrNull = FunctionConvertFromString; using FunctionToInt256OrNull = FunctionConvertFromString; +using FunctionToBFloat16OrNull = FunctionConvertFromString; using FunctionToFloat32OrNull = FunctionConvertFromString; using FunctionToFloat64OrNull = FunctionConvertFromString; using FunctionToDateOrNull = FunctionConvertFromString; @@ -5194,7 +5205,7 @@ private: if constexpr (is_any_of) { @@ -5447,6 +5458,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5485,6 +5497,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5513,6 +5526,7 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From e65bb147d553b3fcd5f361366547b2858a122247 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:27:53 +0100 Subject: [PATCH 120/267] Style --- src/Functions/exp.cpp | 6 ++++++ src/Functions/log.cpp | 5 +++++ src/Functions/sigmoid.cpp | 6 ++++++ src/Functions/tanh.cpp | 6 ++++++ 4 files changed, 23 insertions(+) diff --git a/src/Functions/exp.cpp b/src/Functions/exp.cpp index 07c9288e8ab..24f1d313831 100644 --- a/src/Functions/exp.cpp +++ b/src/Functions/exp.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/log.cpp b/src/Functions/log.cpp index beaa8128b2b..49fc509634b 100644 --- a/src/Functions/log.cpp +++ b/src/Functions/log.cpp @@ -4,6 +4,11 @@ namespace DB { +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp index 1179329845d..bb9710a15fe 100644 --- a/src/Functions/sigmoid.cpp +++ b/src/Functions/sigmoid.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { diff --git a/src/Functions/tanh.cpp b/src/Functions/tanh.cpp index 293318f9bbb..d0e1440485b 100644 --- a/src/Functions/tanh.cpp +++ b/src/Functions/tanh.cpp @@ -3,6 +3,12 @@ namespace DB { + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} + namespace { From b4acc885f35e4cccae818fca477efffbc9332ded Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:37:26 +0100 Subject: [PATCH 121/267] Documentation --- docs/en/sql-reference/data-types/float.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/data-types/float.md b/docs/en/sql-reference/data-types/float.md index 3c789076c1e..7185308bdce 100644 --- a/docs/en/sql-reference/data-types/float.md +++ b/docs/en/sql-reference/data-types/float.md @@ -1,10 +1,10 @@ --- slug: /en/sql-reference/data-types/float sidebar_position: 4 -sidebar_label: Float32, Float64 +sidebar_label: Float32, Float64, BFloat16 --- -# Float32, Float64 +# Float32, Float64, BFloat16 :::note If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead. @@ -117,3 +117,11 @@ SELECT 0 / 0 ``` See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md). + +## BFloat16 + +`BFloat16` is a 16-bit floating point data type with 8-bit exponent, sign, and 7-bit mantissa. + +It is useful for machine learning and AI applications. + +ClickHouse supports conversions between `Float32` and `BFloat16`. Most of other operations are not supported. From 6cb083621aece140d08d800620b0e5fe7bdc2da0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 02:47:59 +0100 Subject: [PATCH 122/267] Documentation --- ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt | 1 + utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt b/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt index e2966898be2..7cae8509b83 100644 --- a/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt +++ b/ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt @@ -3131,3 +3131,4 @@ DistributedCachePoolBehaviourOnLimit SharedJoin ShareSet unacked +BFloat diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a08143467cd..9765b45c085 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -3154,3 +3154,4 @@ znode znodes zookeeperSessionUptime zstd +BFloat From 19e6274a403801ebc37e42954dfa98c70d27eb34 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Sun, 10 Nov 2024 15:40:25 +0330 Subject: [PATCH 123/267] Fixed a typo Signed-off-by: xogoodnow --- tests/integration/test_structured_logging_json/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index bc5f9753f4d..775d5be202c 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -114,7 +114,7 @@ def validate_logs(logs): return result -def valiade_everything(config, node, config_type): +def validate_everything(config, node, config_type): node.query("SELECT 1") logs = node.grep_in_log("").split("\n") return ( @@ -135,8 +135,8 @@ def test_structured_logging_json_format(start_cluster): ["cat", "/etc/clickhouse-server/config.d/config_no_keys_json.xml"] ) - assert valiade_everything(config_all_keys, node_all_keys, "config_all_keys") == True + assert validate_everything(config_all_keys, node_all_keys, "config_all_keys") == True assert ( - valiade_everything(config_some_keys, node_some_keys, "config_some_keys") == True + validate_everything(config_some_keys, node_some_keys, "config_some_keys") == True ) - assert valiade_everything(config_no_keys, node_no_keys, "config_no_keys") == True + assert validate_everything(config_no_keys, node_no_keys, "config_no_keys") == True From bec94da77e8333d64c71b4bf778fbf78d10a8519 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 13:19:08 +0100 Subject: [PATCH 124/267] Progressing --- src/DataTypes/DataTypesDecimal.cpp | 8 +++----- src/DataTypes/DataTypesDecimal.h | 2 -- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index fddae052ada..63bd4bf2a59 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -262,9 +262,9 @@ FOR_EACH_ARITHMETIC_TYPE(INVOKE); template requires (is_arithmetic_v && IsDataTypeDecimal) -ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value*/, UInt32 /*scale*/, typename ToDataType::FieldType & /*result*/) +ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & value, UInt32 scale, typename ToDataType::FieldType & result) { -/* using FromFieldType = typename FromDataType::FieldType; + using FromFieldType = typename FromDataType::FieldType; using ToFieldType = typename ToDataType::FieldType; using ToNativeType = typename ToFieldType::NativeType; @@ -306,9 +306,7 @@ ReturnType convertToDecimalImpl(const typename FromDataType::FieldType & /*value return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); else return ReturnType(convertDecimalsImpl, ToDataType, ReturnType>(static_cast(value), 0, scale, result)); - }*/ - - return ReturnType(); + } } #define DISPATCH(FROM_DATA_TYPE, TO_DATA_TYPE) \ diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index e0d49408981..09a25617506 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -3,9 +3,7 @@ #include #include #include -#include #include -#include #include #include #include From 5258bb6d01642306374e4317c40813a22e51b5b7 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Sun, 10 Nov 2024 16:21:01 +0330 Subject: [PATCH 125/267] Trigger pipeline From f0dc1330eb9d830161531819432a611a363fdc6b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 13:53:08 +0100 Subject: [PATCH 126/267] Rounding --- src/Functions/FunctionsRound.h | 42 ++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 255eca5b406..70ad4d17718 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -268,6 +268,19 @@ inline double roundWithMode(double x, RoundingMode mode) std::unreachable(); } +inline BFloat16 roundWithMode(BFloat16 x, RoundingMode mode) +{ + switch (mode) + { + case RoundingMode::Round: return BFloat16(nearbyintf(Float32(x))); + case RoundingMode::Floor: return BFloat16(floorf(Float32(x))); + case RoundingMode::Ceil: return BFloat16(ceilf(Float32(x))); + case RoundingMode::Trunc: return BFloat16(truncf(Float32(x))); + } + + std::unreachable(); +} + template class FloatRoundingComputationBase { @@ -289,6 +302,11 @@ public: } }; +template <> +class FloatRoundingComputationBase : public FloatRoundingComputationBase +{ +}; + /** Implementation of low-level round-off functions for floating-point values. */ @@ -688,20 +706,26 @@ public: using Types = std::decay_t; using DataType = typename Types::RightType; - if constexpr ((IsDataTypeNumber || IsDataTypeDecimal) - && !std::is_same_v) + if (arguments.size() > 1) { - if (arguments.size() > 1) + const ColumnWithTypeAndName & scale_column = arguments[1]; + + auto call_scale = [&](const auto & scaleTypes) -> bool { - const ColumnWithTypeAndName & scale_column = arguments[1]; - res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); + using ScaleTypes = std::decay_t; + using ScaleType = typename ScaleTypes::RightType; + + res = Dispatcher::template apply(value_arg.column.get(), scale_column.column.get()); return true; - } - res = Dispatcher::template apply(value_arg.column.get()); + }; + + TypeIndex right_index = scale_column.type->getTypeId(); + if (!callOnBasicType(right_index, call_scale)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale argument for rounding functions must have integer type"); return true; } - else - return false; + res = Dispatcher::template apply(value_arg.column.get()); + return true; }; #if !defined(__SSE4_1__) From 01ca2b6947fcf0494d90b6f819a1f53c9e58aa03 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Sun, 10 Nov 2024 16:34:16 +0330 Subject: [PATCH 127/267] ran black Signed-off-by: xogoodnow --- tests/integration/test_structured_logging_json/test.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_structured_logging_json/test.py b/tests/integration/test_structured_logging_json/test.py index 775d5be202c..5fe49b784f2 100644 --- a/tests/integration/test_structured_logging_json/test.py +++ b/tests/integration/test_structured_logging_json/test.py @@ -135,8 +135,11 @@ def test_structured_logging_json_format(start_cluster): ["cat", "/etc/clickhouse-server/config.d/config_no_keys_json.xml"] ) - assert validate_everything(config_all_keys, node_all_keys, "config_all_keys") == True assert ( - validate_everything(config_some_keys, node_some_keys, "config_some_keys") == True + validate_everything(config_all_keys, node_all_keys, "config_all_keys") == True + ) + assert ( + validate_everything(config_some_keys, node_some_keys, "config_some_keys") + == True ) assert validate_everything(config_no_keys, node_no_keys, "config_no_keys") == True From db98fb4c79252d6305eabc06a749e2082bb1c489 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 14:39:45 +0100 Subject: [PATCH 128/267] Documentation --- src/Functions/FunctionsConversion.cpp | 64 +++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index f37dff35862..37a4ba30d30 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -5458,7 +5458,17 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts Float32 to BFloat16 with losing the precision. + +Example: +[example:typical] +)", + .examples{ + {"typical", "SELECT toBFloat16(12.3::Float32);", "12.3125"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); @@ -5497,7 +5507,31 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts String to BFloat16. + +If the string does not represent a floating point value, the function returns zero. + +The function allows a silent loss of precision while converting from the string representation. In that case, it will return the truncated result. + +Example of successful conversion: +[example:typical] + +Examples of not successful conversion: +[example:invalid1] +[example:invalid2] + +Example of a loss of precision: +[example:precision] +)", + .examples{ + {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}}, + {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}}, + {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}}, + {"precision", "SELECT toBFloat16OrZero('12.3456789');", "12.375"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -5526,7 +5560,31 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); + + factory.registerFunction(FunctionDocumentation{.description=R"( +Converts String to Nullable(BFloat16). + +If the string does not represent a floating point value, the function returns NULL. + +The function allows a silent loss of precision while converting from the string representation. In that case, it will return the truncated result. + +Example of successful conversion: +[example:typical] + +Examples of not successful conversion: +[example:invalid1] +[example:invalid2] + +Example of a loss of precision: +[example:precision] +)", + .examples{ + {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}}, + {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}}, + {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}}, + {"precision", "SELECT toBFloat16OrNull('12.3456789');", "12.375"}}, + .categories{"Conversion"}}); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); From 56f6062bd848ee4df0c3bcf9bc2932ec57c52916 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 10 Nov 2024 14:51:19 +0100 Subject: [PATCH 129/267] better --- src/Core/Settings.cpp | 2 +- src/Interpreters/ConcurrentHashJoin.cpp | 6 +++--- src/Interpreters/ConcurrentHashJoin.h | 2 +- src/Planner/PlannerJoinTree.cpp | 4 ++-- src/Processors/QueryPlan/JoinStep.cpp | 14 +++++++------- src/Processors/QueryPlan/JoinStep.h | 4 ++-- src/QueryPipeline/QueryPipelineBuilder.cpp | 6 +++--- src/QueryPipeline/QueryPipelineBuilder.h | 2 +- 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 9612b4511b2..dd0a5cd2c52 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -144,7 +144,7 @@ Squash blocks passed to the external table to a specified size in bytes, if bloc M(UInt64, max_joined_block_size_rows, DEFAULT_BLOCK_SIZE, R"( Maximum block size for JOIN result (if join algorithm supports it). 0 means unlimited. )", 0) \ - M(UInt64, min_joined_block_size_rows, 1024, R"( + M(UInt64, min_joined_block_size_bytes, 524288, R"( Minimum block size for JOIN result (if join algorithm supports it). 0 means unlimited. )", 0) \ M(UInt64, max_insert_threads, 0, R"( diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index aa1673655be..71b16b9a35d 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -178,7 +178,7 @@ bool ConcurrentHashJoin::addBlockToJoin(const Block & right_block_, bool check_l /// (inside different `hash_join`-s) because the block will be shared. Block right_block = hash_joins[0]->data->materializeColumnsFromRightBlock(right_block_); - auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); + auto dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, std::move(right_block)); size_t blocks_left = 0; for (const auto & block : dispatched_blocks) { @@ -239,7 +239,7 @@ void ConcurrentHashJoin::joinBlock(Block & block, ExtraScatteredBlocks & extra_b else { hash_joins[0]->data->materializeColumnsFromLeftBlock(block); - dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, block); + dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_left, std::move(block)); } block = {}; @@ -402,7 +402,7 @@ ScatteredBlocks scatterBlocksWithSelector(size_t num_shards, const IColumn::Sele return result; } -ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, Block & from_block) +ScatteredBlocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, Block && from_block) { size_t num_shards = hash_joins.size(); if (num_shards == 1) diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 555a61d4004..2266e53f80c 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -79,7 +79,7 @@ private: std::mutex totals_mutex; Block totals; - ScatteredBlocks dispatchBlock(const Strings & key_columns_names, Block & from_block); + ScatteredBlocks dispatchBlock(const Strings & key_columns_names, Block && from_block); }; UInt64 calculateCacheKey(std::shared_ptr & table_join, const QueryTreeNodePtr & right_table_expression); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 72492db84d2..19bd2510d4c 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -104,7 +104,7 @@ namespace Setting extern const SettingsBool optimize_move_to_prewhere; extern const SettingsBool optimize_move_to_prewhere_if_final; extern const SettingsBool use_concurrency_control; - extern const SettingsUInt64 min_joined_block_size_rows; + extern const SettingsUInt64 min_joined_block_size_bytes; } namespace ErrorCodes @@ -1624,7 +1624,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ right_plan.getCurrentHeader(), std::move(join_algorithm), settings[Setting::max_block_size], - settings[Setting::min_joined_block_size_rows], + settings[Setting::min_joined_block_size_bytes], settings[Setting::max_threads], false /*optimize_read_in_order*/); diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 91e4869f2c1..86bc818d266 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -44,12 +44,12 @@ JoinStep::JoinStep( const Header & right_header_, JoinPtr join_, size_t max_block_size_, - size_t min_block_size_, + size_t min_block_size_bytes_, size_t max_streams_, bool keep_left_read_in_order_) : join(std::move(join_)) , max_block_size(max_block_size_) - , min_block_size(min_block_size_) + , min_block_size_bytes(min_block_size_bytes_) , max_streams(max_streams_) , keep_left_read_in_order(keep_left_read_in_order_) { @@ -69,21 +69,21 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines return joined_pipeline; } - auto ppl = QueryPipelineBuilder::joinPipelinesRightLeft( + auto pipeline = QueryPipelineBuilder::joinPipelinesRightLeft( std::move(pipelines[0]), std::move(pipelines[1]), join, *output_header, max_block_size, - min_block_size, + min_block_size_bytes, max_streams, keep_left_read_in_order, &processors); - ppl->addSimpleTransform([&](const Block & header) - { return std::make_shared(header, min_block_size, 0); }); + pipeline->addSimpleTransform([&](const Block & header) + { return std::make_shared(header, 0, min_block_size_bytes); }); - return ppl; + return pipeline; } bool JoinStep::allowPushDownToRight() const diff --git a/src/Processors/QueryPlan/JoinStep.h b/src/Processors/QueryPlan/JoinStep.h index 9222ced2e55..bc9b7600510 100644 --- a/src/Processors/QueryPlan/JoinStep.h +++ b/src/Processors/QueryPlan/JoinStep.h @@ -18,7 +18,7 @@ public: const Header & right_header_, JoinPtr join_, size_t max_block_size_, - size_t min_block_size_, + size_t min_block_size_bytes_, size_t max_streams_, bool keep_left_read_in_order_); @@ -40,7 +40,7 @@ private: JoinPtr join; size_t max_block_size; - size_t min_block_size; + size_t min_block_size_bytes; size_t max_streams; bool keep_left_read_in_order; }; diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index a9a3b081fac..7612af7e3d5 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -386,7 +386,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe JoinPtr join, const Block & output_header, size_t max_block_size, - size_t min_block_size, + size_t min_block_size_bytes, size_t max_streams, bool keep_left_read_in_order, Processors * collected_processors) @@ -443,7 +443,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Processors processors; for (auto & outport : outports) { - auto squashing = std::make_shared(right->getHeader(), min_block_size, 0); + auto squashing = std::make_shared(right->getHeader(), 0, min_block_size_bytes); connect(*outport, squashing->getInputs().front()); processors.emplace_back(squashing); auto adding_joined = std::make_shared(right->getHeader(), join); @@ -502,7 +502,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe Block left_header = left->getHeader(); for (size_t i = 0; i < num_streams; ++i) { - auto squashing = std::make_shared(left->getHeader(), min_block_size, 0); + auto squashing = std::make_shared(left->getHeader(), 0, min_block_size_bytes); connect(**lit, squashing->getInputs().front()); auto joining = std::make_shared( diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 34bb62ee0d2..312655b7b6d 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -126,7 +126,7 @@ public: JoinPtr join, const Block & output_header, size_t max_block_size, - size_t min_block_size, + size_t min_block_size_bytes, size_t max_streams, bool keep_left_read_in_order, Processors * collected_processors = nullptr); From 64244250c20f6b4083319b6f509c7cf3fa0621a9 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 10 Nov 2024 15:06:02 +0100 Subject: [PATCH 130/267] fix --- src/Interpreters/HashJoin/ScatteredBlock.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/HashJoin/ScatteredBlock.h b/src/Interpreters/HashJoin/ScatteredBlock.h index d94497e304b..729377f6758 100644 --- a/src/Interpreters/HashJoin/ScatteredBlock.h +++ b/src/Interpreters/HashJoin/ScatteredBlock.h @@ -302,6 +302,8 @@ struct ScatteredBlock : private boost::noncopyable /// Cut first `num_rows` rows from `block` in place and returns block with remaining rows ScatteredBlock cut(size_t num_rows) { + SCOPE_EXIT(filterBySelector()); + if (num_rows >= rows()) return ScatteredBlock{Block{}}; From 1c85a0401fbbddccbd3e310a965ce0eb67079a2b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 15:14:17 +0100 Subject: [PATCH 131/267] Documentation --- src/Functions/FunctionsConversion.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 37a4ba30d30..7f4ccc338cf 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -5526,9 +5526,9 @@ Example of a loss of precision: [example:precision] )", .examples{ - {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}}, - {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}}, - {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}}, + {"typical", "SELECT toBFloat16OrZero('12.3');", "12.3125"}, + {"invalid1", "SELECT toBFloat16OrZero('abc');", "0"}, + {"invalid2", "SELECT toBFloat16OrZero(' 1');", "0"}, {"precision", "SELECT toBFloat16OrZero('12.3456789');", "12.375"}}, .categories{"Conversion"}}); @@ -5579,9 +5579,9 @@ Example of a loss of precision: [example:precision] )", .examples{ - {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}}, - {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}}, - {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}}, + {"typical", "SELECT toBFloat16OrNull('12.3');", "12.3125"}, + {"invalid1", "SELECT toBFloat16OrNull('abc');", "NULL"}, + {"invalid2", "SELECT toBFloat16OrNull(' 1');", "NULL"}, {"precision", "SELECT toBFloat16OrNull('12.3456789');", "12.375"}}, .categories{"Conversion"}}); From 0a79fd0c4087b13e3f79e69a3b8c91c1fd48defd Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 10 Nov 2024 15:16:25 +0100 Subject: [PATCH 132/267] fix tests --- .../00779_all_right_join_max_block_size.sql | 1 + .../02001_join_on_const_bs_long.sql.j2 | 1 + .../02236_explain_pipeline_join.reference | 19 ++++++++++--------- .../02962_max_joined_block_rows.sql | 2 ++ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql b/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql index f14b1393b3b..6b93f13d27f 100644 --- a/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql +++ b/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql @@ -1,2 +1,3 @@ +SET min_joined_block_size_bytes = 0; SET max_block_size = 6; SELECT blockSize() bs FROM (SELECT 1 s) js1 ALL RIGHT JOIN (SELECT arrayJoin([2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3]) s) js2 USING (s) GROUP BY bs ORDER BY bs; diff --git a/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 b/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 index 1726bcb7062..7a4d0857182 100644 --- a/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 +++ b/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 @@ -7,6 +7,7 @@ CREATE TABLE t2 (id Int) ENGINE = MergeTree ORDER BY id; INSERT INTO t1 VALUES (1), (2); INSERT INTO t2 SELECT number + 5 AS x FROM (SELECT * FROM system.numbers LIMIT 1111); +SET min_joined_block_size_bytes = 0; SET max_block_size = 100; SELECT count() == 2222 FROM t1 JOIN t2 ON 1 = 1; diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.reference b/tests/queries/0_stateless/02236_explain_pipeline_join.reference index 73d0ca8cb5e..ba366d89139 100644 --- a/tests/queries/0_stateless/02236_explain_pipeline_join.reference +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.reference @@ -1,17 +1,18 @@ (Expression) ExpressionTransform (Join) - JoiningTransform 2 → 1 - (Expression) - ExpressionTransform - (Limit) - Limit - (ReadFromSystemNumbers) - NumbersRange 0 → 1 - (Expression) - FillingRightJoinSide + SimpleSquashingTransform + JoiningTransform 2 → 1 + (Expression) ExpressionTransform (Limit) Limit (ReadFromSystemNumbers) NumbersRange 0 → 1 + (Expression) + FillingRightJoinSide + ExpressionTransform + (Limit) + Limit + (ReadFromSystemNumbers) + NumbersRange 0 → 1 diff --git a/tests/queries/0_stateless/02962_max_joined_block_rows.sql b/tests/queries/0_stateless/02962_max_joined_block_rows.sql index 27b2a74b802..9edf757c0f7 100644 --- a/tests/queries/0_stateless/02962_max_joined_block_rows.sql +++ b/tests/queries/0_stateless/02962_max_joined_block_rows.sql @@ -8,6 +8,8 @@ CREATE table t2 (a UInt64) ENGINE = Memory; INSERT INTO t2 SELECT number % 2 FROM numbers(10); +SET min_joined_block_size_bytes = 0; + -- block size is always multiple of 5 because we have 5 rows for each key in right table -- we do not split rows corresponding to the same key From bf8fc60bacbb95e12760b00960115e2a6230c280 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 16:20:44 +0100 Subject: [PATCH 133/267] Arithmetic --- src/Functions/FunctionBinaryArithmetic.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 854b40df441..43140427170 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -804,7 +804,7 @@ class FunctionBinaryArithmetic : public IFunction DataTypeFixedString, DataTypeString, DataTypeInterval>; - using Floats = TypeList; + using Floats = TypeList; using ValidTypes = std::conditional_t, @@ -2043,7 +2043,15 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A using DecimalResultType = typename BinaryOperationTraits::DecimalResultDataType; if constexpr (std::is_same_v) + { return nullptr; + } + else if constexpr ((std::is_same_v || std::is_same_v) + && (sizeof(typename LeftDataType::FieldType) > 8 || sizeof(typename RightDataType::FieldType) > 8)) + { + /// Big integers and BFloat16 are not supported together. + return nullptr; + } else // we can't avoid the else because otherwise the compiler may assume the ResultDataType may be Invalid // and that would produce the compile error. { @@ -2060,7 +2068,7 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A ColumnPtr left_col = nullptr; ColumnPtr right_col = nullptr; - /// When Decimal op Float32/64, convert both of them into Float64 + /// When Decimal op Float32/64/16, convert both of them into Float64 if constexpr (decimal_with_float) { const auto converted_type = std::make_shared(); @@ -2095,7 +2103,6 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A /// Here we check if we have `intDiv` or `intDivOrZero` and at least one of the arguments is decimal, because in this case originally we had result as decimal, so we need to convert result into integer after calculations else if constexpr (!decimal_with_float && (is_int_div || is_int_div_or_zero) && (IsDataTypeDecimal || IsDataTypeDecimal)) { - if constexpr (!std::is_same_v) { DataTypePtr type_res; From 62c94a784158274e28cf05136cf4023de47f4f01 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 16:40:24 +0100 Subject: [PATCH 134/267] Maybe better --- cmake/cpu_features.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 2bb6deb4847..dbc77d835be 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -85,7 +85,7 @@ elseif (ARCH_AARCH64) # [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions- # [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en # [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md - set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc") + set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16") endif () # Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM From 08e6e598f7c140d0be39a64d933521872716ed2c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 17:41:37 +0100 Subject: [PATCH 135/267] Better code --- src/Common/findExtreme.h | 2 +- src/DataTypes/IDataType.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Common/findExtreme.h b/src/Common/findExtreme.h index c2b31c51e87..68e7360d6e2 100644 --- a/src/Common/findExtreme.h +++ b/src/Common/findExtreme.h @@ -11,7 +11,7 @@ namespace DB { template -concept has_find_extreme_implementation = (is_any_of); +concept has_find_extreme_implementation = (is_any_of); template std::optional findExtremeMin(const T * __restrict ptr, size_t start, size_t end); diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 4d64b927d83..1e41d6b2eba 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -606,7 +606,6 @@ template inline constexpr bool IsDataTypeEnum> = tr M(Int16) \ M(Int32) \ M(Int64) \ - M(BFloat16) \ M(Float32) \ M(Float64) From 715649900166b1d1b8aaefce215c9a80e6d60f69 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 10 Nov 2024 19:16:19 +0100 Subject: [PATCH 136/267] don't reserve too much --- src/Interpreters/HashJoin/AddedColumns.h | 2 +- tests/performance/hashjoin_with_large_output.xml | 1 + tests/performance/scripts/perf.py | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 8316d5df00f..885c1baca8c 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -169,7 +169,7 @@ public: return; /// Do not allow big allocations when user set max_joined_block_rows to huge value - size_t reserve_size = std::min(max_joined_block_rows, DEFAULT_BLOCK_SIZE * 2); /// rows_to_add + size_t reserve_size = std::min(max_joined_block_rows, rows_to_add * 2); if (need_replicate) /// Reserve 10% more space for columns, because some rows can be repeated diff --git a/tests/performance/hashjoin_with_large_output.xml b/tests/performance/hashjoin_with_large_output.xml index f4b61c15f82..1eb351255d4 100644 --- a/tests/performance/hashjoin_with_large_output.xml +++ b/tests/performance/hashjoin_with_large_output.xml @@ -9,6 +9,7 @@ settings join_algorithm='hash' + join_algorithm='parallel_hash' join_algorithm='grace_hash' diff --git a/tests/performance/scripts/perf.py b/tests/performance/scripts/perf.py index 9931178fcb4..e4a599cc78d 100755 --- a/tests/performance/scripts/perf.py +++ b/tests/performance/scripts/perf.py @@ -478,6 +478,8 @@ for query_index in queries_to_run: client_seconds = time.perf_counter() - start_seconds print(f"client-time\t{query_index}\t{client_seconds}\t{server_seconds}") + median = [statistics.median(t) for t in all_server_times] + print(f"median\t{query_index}\t{median[0]}") # Run additional profiling queries to collect profile data, but only if test times appeared to be different. # We have to do it after normal runs because otherwise it will affect test statistics too much @@ -491,7 +493,6 @@ for query_index in queries_to_run: pvalue = stats.ttest_ind( all_server_times[0], all_server_times[1], equal_var=False ).pvalue - median = [statistics.median(t) for t in all_server_times] # Keep this consistent with the value used in report. Should eventually move # to (median[1] - median[0]) / min(median), which is compatible with "times" # difference we use in report (max(median) / min(median)). From 9baa5911f9183e1652593b5d362545377baeea2a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sun, 10 Nov 2024 20:54:59 +0000 Subject: [PATCH 137/267] Debugging stack with PR queries --- src/Planner/findParallelReplicasQuery.cpp | 57 +++++++++++++++++------ 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 28e2dd8a0ea..fbe2993b7c6 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -23,6 +23,8 @@ #include #include +#include + namespace DB { namespace Setting @@ -38,12 +40,12 @@ namespace ErrorCodes /// Returns a list of (sub)queries (candidates) which may support parallel replicas. /// The rule is : -/// subquery has only LEFT or ALL INNER JOIN (or none), and left part is MergeTree table or subquery candidate as well. +/// subquery has only LEFT / RIGHT / ALL INNER JOIN (or none), and left / right part is MergeTree table or subquery candidate as well. /// /// Additional checks are required, so we return many candidates. The innermost subquery is on top. -std::stack getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) +std::vector getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) { - std::stack res; + std::vector res; while (query_tree_node) { @@ -75,7 +77,7 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre { const auto & query_node_to_process = query_tree_node->as(); query_tree_node = query_node_to_process.getJoinTree().get(); - res.push(&query_node_to_process); + res.push_back(&query_node_to_process); break; } case QueryTreeNodeType::UNION: @@ -162,14 +164,25 @@ QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const Cont return query->cloneAndReplace(visitor.replacement_map); } +static void dumpStack(const std::vector & stack) +{ + std::ranges::reverse_view rv{stack}; + for (const auto * node : rv) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}\n{}", CityHash_v1_0_2::Hash128to64(node->getTreeHash()), node->dumpTree()); +} + /// Find the best candidate for parallel replicas execution by verifying query plan. -/// If query plan has only Expression, Filter of Join steps, we can execute it fully remotely and check the next query. +/// If query plan has only Expression, Filter or Join steps, we can execute it fully remotely and check the next query. /// Otherwise we can execute current query up to WithMergableStage only. const QueryNode * findQueryForParallelReplicas( - std::stack stack, + std::vector stack, const std::unordered_map & mapping, const Settings & settings) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}", StackTrace().toString()); + + dumpStack(stack); + struct Frame { const QueryPlan::Node * node = nullptr; @@ -188,14 +201,20 @@ const QueryNode * findQueryForParallelReplicas( while (!stack.empty()) { - const QueryNode * const subquery_node = stack.top(); - stack.pop(); + const QueryNode * const subquery_node = stack.back(); + stack.pop_back(); auto it = mapping.find(subquery_node); /// This should not happen ideally. if (it == mapping.end()) break; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "{} : {}", + CityHash_v1_0_2::Hash128to64(it->first->getTreeHash()), + it->second->step->getName()); + std::stack nodes_to_check; nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; @@ -208,6 +227,8 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + if (children.empty()) { /// Found a source step. @@ -235,7 +256,7 @@ const QueryNode * findQueryForParallelReplicas( else { const auto * join = typeid_cast(step); - /// We've checked that JOIN is INNER/LEFT in query tree. + /// We've checked that JOIN is INNER/LEFT/RIGHT on query tree level before. /// Don't distribute UNION node. if (!join) return res; @@ -286,7 +307,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; /// We don't have any subquery and storage can process parallel replicas by itself. - if (stack.top() == query_tree_node.get()) + if (stack.back() == query_tree_node.get()) return nullptr; /// This is needed to avoid infinite recursion. @@ -309,18 +330,24 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr const auto & mapping = planner.getQueryNodeToPlanStepMapping(); const auto * res = findQueryForParallelReplicas(new_stack, mapping, context->getSettingsRef()); - /// Now, return a query from initial stack. if (res) { + // find query in initial stack while (!new_stack.empty()) { - if (res == new_stack.top()) - return stack.top(); + if (res == new_stack.back()) + { + res = stack.back(); + break; + } - stack.pop(); - new_stack.pop(); + stack.pop_back(); + new_stack.pop_back(); } } + + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); return res; } From 7877d59ff6e7334cde310b2eec626bc6ba7442fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 22:13:22 +0100 Subject: [PATCH 138/267] Manual implementation --- base/base/BFloat16.h | 300 +++++++++++++++++- base/base/DecomposedFloat.h | 2 +- base/base/TypeLists.h | 5 +- src/AggregateFunctions/AggregateFunctionAvg.h | 2 +- .../AggregateFunctionDeltaSum.cpp | 6 +- .../AggregateFunctionDeltaSumTimestamp.cpp | 10 +- .../AggregateFunctionMaxIntersections.cpp | 6 +- .../AggregateFunctionSparkbar.cpp | 12 +- src/AggregateFunctions/AggregateFunctionSum.h | 11 +- src/Core/Types_fwd.h | 2 +- src/Functions/FunctionsRound.h | 2 +- src/Functions/PolygonUtils.h | 4 +- src/Functions/divide.cpp | 2 +- src/IO/WriteHelpers.h | 2 +- 14 files changed, 318 insertions(+), 48 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 99eab5c67cb..9c6196d6aab 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -1,22 +1,294 @@ #pragma once -#include +#include +#include -using BFloat16 = __bf16; +//using BFloat16 = __bf16; + +class BFloat16 +{ +private: + UInt16 x = 0; + +public: + constexpr BFloat16() = default; + constexpr BFloat16(const BFloat16 & other) = default; + constexpr BFloat16 & operator=(const BFloat16 & other) = default; + + explicit constexpr BFloat16(const Float32 & other) + { + x = static_cast(std::bit_cast(other) >> 16); + } + + template + explicit constexpr BFloat16(const T & other) + : BFloat16(Float32(other)) + { + } + + template + constexpr BFloat16 & operator=(const T & other) + { + *this = BFloat16(other); + return *this; + } + + explicit constexpr operator Float32() const + { + return std::bit_cast(static_cast(x) << 16); + } + + template + explicit constexpr operator T() const + { + return T(Float32(*this)); + } + + constexpr bool isFinite() const + { + return (x & 0b0111111110000000) != 0b0111111110000000; + } + + constexpr bool isNaN() const + { + return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000; + } + + constexpr bool signBit() const + { + return x & 0b1000000000000000; + } + + constexpr bool operator==(const BFloat16 & other) const + { + return x == other.x; + } + + constexpr bool operator!=(const BFloat16 & other) const + { + return x != other.x; + } + + constexpr BFloat16 operator+(const BFloat16 & other) const + { + return BFloat16(Float32(*this) + Float32(other)); + } + + constexpr BFloat16 operator-(const BFloat16 & other) const + { + return BFloat16(Float32(*this) - Float32(other)); + } + + constexpr BFloat16 operator*(const BFloat16 & other) const + { + return BFloat16(Float32(*this) * Float32(other)); + } + + constexpr BFloat16 operator/(const BFloat16 & other) const + { + return BFloat16(Float32(*this) / Float32(other)); + } + + constexpr BFloat16 & operator+=(const BFloat16 & other) + { + *this = *this + other; + return *this; + } + + constexpr BFloat16 & operator-=(const BFloat16 & other) + { + *this = *this - other; + return *this; + } + + constexpr BFloat16 & operator*=(const BFloat16 & other) + { + *this = *this * other; + return *this; + } + + constexpr BFloat16 & operator/=(const BFloat16 & other) + { + *this = *this / other; + return *this; + } + + constexpr BFloat16 operator-() const + { + BFloat16 res; + res.x = x ^ 0b1000000000000000; + return res; + } +}; + + +template +requires(!std::is_same_v) +constexpr bool operator==(const BFloat16 & a, const T & b) +{ + return Float32(a) == b; +} + +template +requires(!std::is_same_v) +constexpr bool operator==(const T & a, const BFloat16 & b) +{ + return a == Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator!=(const BFloat16 & a, const T & b) +{ + return Float32(a) != b; +} + +template +requires(!std::is_same_v) +constexpr bool operator!=(const T & a, const BFloat16 & b) +{ + return a != Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator<(const BFloat16 & a, const T & b) +{ + return Float32(a) < b; +} + +template +requires(!std::is_same_v) +constexpr bool operator<(const T & a, const BFloat16 & b) +{ + return a < Float32(b); +} + +constexpr inline bool operator<(BFloat16 a, BFloat16 b) +{ + return Float32(a) < Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator>(const BFloat16 & a, const T & b) +{ + return Float32(a) > b; +} + +template +requires(!std::is_same_v) +constexpr bool operator>(const T & a, const BFloat16 & b) +{ + return a > Float32(b); +} + +constexpr inline bool operator>(BFloat16 a, BFloat16 b) +{ + return Float32(a) > Float32(b); +} + + +template +requires(!std::is_same_v) +constexpr bool operator<=(const BFloat16 & a, const T & b) +{ + return Float32(a) <= b; +} + +template +requires(!std::is_same_v) +constexpr bool operator<=(const T & a, const BFloat16 & b) +{ + return a <= Float32(b); +} + +constexpr inline bool operator<=(BFloat16 a, BFloat16 b) +{ + return Float32(a) <= Float32(b); +} + +template +requires(!std::is_same_v) +constexpr bool operator>=(const BFloat16 & a, const T & b) +{ + return Float32(a) >= b; +} + +template +requires(!std::is_same_v) +constexpr bool operator>=(const T & a, const BFloat16 & b) +{ + return a >= Float32(b); +} + +constexpr inline bool operator>=(BFloat16 a, BFloat16 b) +{ + return Float32(a) >= Float32(b); +} + + +template +requires(!std::is_same_v) +constexpr inline auto operator+(T a, BFloat16 b) +{ + return a + Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator+(BFloat16 a, T b) +{ + return Float32(a) + b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator-(T a, BFloat16 b) +{ + return a - Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator-(BFloat16 a, T b) +{ + return Float32(a) - b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator*(T a, BFloat16 b) +{ + return a * Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator*(BFloat16 a, T b) +{ + return Float32(a) * b; +} + +template +requires(!std::is_same_v) +constexpr inline auto operator/(T a, BFloat16 b) +{ + return a / Float32(b); +} + +template +requires(!std::is_same_v) +constexpr inline auto operator/(BFloat16 a, T b) +{ + return Float32(a) / b; +} + namespace std { - inline constexpr bool isfinite(BFloat16 x) { return (bit_cast(x) & 0b0111111110000000) != 0b0111111110000000; } - inline constexpr bool signbit(BFloat16 x) { return bit_cast(x) & 0b1000000000000000; } -} - -inline Float32 BFloat16ToFloat32(BFloat16 x) -{ - return bit_cast(static_cast(bit_cast(x)) << 16); -} - -inline BFloat16 Float32ToBFloat16(Float32 x) -{ - return bit_cast(std::bit_cast(x) >> 16); + inline constexpr bool isfinite(BFloat16 x) { return x.isFinite(); } + inline constexpr bool isnan(BFloat16 x) { return x.isNaN(); } + inline constexpr bool signbit(BFloat16 x) { return x.signBit(); } } diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 26a929b4997..3bd059cb21c 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -11,7 +11,7 @@ template struct FloatTraits; template <> -struct FloatTraits<__bf16> +struct FloatTraits { using UInt = uint16_t; static constexpr size_t bits = 16; diff --git a/base/base/TypeLists.h b/base/base/TypeLists.h index ce3111b1da3..375ea94b5ea 100644 --- a/base/base/TypeLists.h +++ b/base/base/TypeLists.h @@ -9,10 +9,11 @@ namespace DB { using TypeListNativeInt = TypeList; -using TypeListFloat = TypeList; -using TypeListNativeNumber = TypeListConcat; +using TypeListNativeFloat = TypeList; +using TypeListNativeNumber = TypeListConcat; using TypeListWideInt = TypeList; using TypeListInt = TypeListConcat; +using TypeListFloat = TypeListConcat>; using TypeListIntAndFloat = TypeListConcat; using TypeListDecimal = TypeList; using TypeListNumber = TypeListConcat; diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 6e1e9289565..8d53a081ee0 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -231,7 +231,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final { - increment(place, static_cast(*columns[0]).getData()[row_num]); + increment(place, Numerator(static_cast(*columns[0]).getData()[row_num])); ++this->data(place).denominator; } diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp index 42169c34c25..c61b9918a35 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSum.cpp @@ -27,9 +27,9 @@ namespace template struct AggregationFunctionDeltaSumData { - T sum = 0; - T last = 0; - T first = 0; + T sum{}; + T last{}; + T first{}; bool seen = false; }; diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index 5819c533fd9..dc1adead87c 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -25,11 +25,11 @@ namespace template struct AggregationFunctionDeltaSumTimestampData { - ValueType sum = 0; - ValueType first = 0; - ValueType last = 0; - TimestampType first_ts = 0; - TimestampType last_ts = 0; + ValueType sum{}; + ValueType first{}; + ValueType last{}; + TimestampType first_ts{}; + TimestampType last_ts{}; bool seen = false; }; diff --git a/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp index ca91f960dab..f4edec7f528 100644 --- a/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp +++ b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp @@ -155,9 +155,9 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - Int64 current_intersections = 0; - Int64 max_intersections = 0; - PointType position_of_max_intersections = 0; + Int64 current_intersections{}; + Int64 max_intersections{}; + PointType position_of_max_intersections{}; /// const_cast because we will sort the array auto & array = this->data(place).value; diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index 33412d50b21..de2a741e105 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -45,7 +45,7 @@ struct AggregateFunctionSparkbarData Y insert(const X & x, const Y & y) { if (isNaN(y) || y <= 0) - return 0; + return {}; auto [it, inserted] = points.insert({x, y}); if (!inserted) @@ -173,13 +173,13 @@ private: if (from_x >= to_x) { - size_t sz = updateFrame(values, 8); + size_t sz = updateFrame(values, Y{8}); values.push_back('\0'); offsets.push_back(offsets.empty() ? sz + 1 : offsets.back() + sz + 1); return; } - PaddedPODArray histogram(width, 0); + PaddedPODArray histogram(width, Y{0}); PaddedPODArray count_histogram(width, 0); /// The number of points in each bucket for (const auto & point : data.points) @@ -218,10 +218,10 @@ private: for (size_t i = 0; i < histogram.size(); ++i) { if (count_histogram[i] > 0) - histogram[i] /= count_histogram[i]; + histogram[i] = histogram[i] / count_histogram[i]; } - Y y_max = 0; + Y y_max{}; for (auto & y : histogram) { if (isNaN(y) || y <= 0) @@ -245,7 +245,7 @@ private: continue; } - constexpr auto levels_num = static_cast(BAR_LEVELS - 1); + constexpr auto levels_num = Y{BAR_LEVELS - 1}; if constexpr (is_floating_point) { y = y / (y_max / levels_num) + 1; diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index f6c51241a5c..7c7fb6338a2 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -83,7 +83,7 @@ struct AggregateFunctionSumData while (ptr < unrolled_end) { for (size_t i = 0; i < unroll_count; ++i) - Impl::add(partial_sums[i], ptr[i]); + Impl::add(partial_sums[i], T(ptr[i])); ptr += unroll_count; } @@ -95,7 +95,7 @@ struct AggregateFunctionSumData T local_sum{}; while (ptr < end_ptr) { - Impl::add(local_sum, *ptr); + Impl::add(local_sum, T(*ptr)); ++ptr; } Impl::add(sum, local_sum); @@ -227,7 +227,7 @@ struct AggregateFunctionSumData while (ptr < end_ptr) { if (!*condition_map == add_if_zero) - Impl::add(local_sum, *ptr); + Impl::add(local_sum, T(*ptr)); ++ptr; ++condition_map; } @@ -488,10 +488,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { const auto & column = assert_cast(*columns[0]); - if constexpr (is_big_int_v) - this->data(place).add(static_cast(column.getData()[row_num])); - else - this->data(place).add(column.getData()[row_num]); + this->data(place).add(static_cast(column.getData()[row_num])); } void addBatchSinglePlace( diff --git a/src/Core/Types_fwd.h b/src/Core/Types_fwd.h index 6d3383ae7ff..b94a29ce72c 100644 --- a/src/Core/Types_fwd.h +++ b/src/Core/Types_fwd.h @@ -21,7 +21,7 @@ using Int128 = wide::integer<128, signed>; using UInt128 = wide::integer<128, unsigned>; using Int256 = wide::integer<256, signed>; using UInt256 = wide::integer<256, unsigned>; -using BFloat16 = __bf16; +class BFloat16; namespace DB { diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 70ad4d17718..6c9cc8a37b3 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -298,7 +298,7 @@ public: static VectorType prepare(size_t scale) { - return load1(scale); + return load1(ScalarType(scale)); } }; diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index bf8241774a6..601ffcb00b4 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -583,7 +583,7 @@ struct CallPointInPolygon template static ColumnPtr call(const IColumn & x, const IColumn & y, PointInPolygonImpl && impl) { - using Impl = TypeListChangeRoot; + using Impl = TypeListChangeRoot; if (auto column = typeid_cast *>(&x)) return Impl::template call(*column, y, impl); return CallPointInPolygon::call(x, y, impl); @@ -609,7 +609,7 @@ struct CallPointInPolygon<> template NO_INLINE ColumnPtr pointInPolygon(const IColumn & x, const IColumn & y, PointInPolygonImpl && impl) { - using Impl = TypeListChangeRoot; + using Impl = TypeListChangeRoot; return Impl::call(x, y, impl); } diff --git a/src/Functions/divide.cpp b/src/Functions/divide.cpp index 7c67245c382..3947ba2d142 100644 --- a/src/Functions/divide.cpp +++ b/src/Functions/divide.cpp @@ -18,7 +18,7 @@ struct DivideFloatingImpl template static NO_SANITIZE_UNDEFINED Result apply(A a [[maybe_unused]], B b [[maybe_unused]]) { - return static_cast(a) / b; + return static_cast(a) / static_cast(b); } #if USE_EMBEDDED_COMPILER diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index f01e09e3f73..0a32c4c5446 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -174,7 +174,7 @@ inline size_t writeFloatTextFastPath(T x, char * buffer) } else if constexpr (std::is_same_v) { - Float32 f32 = BFloat16ToFloat32(x); + Float32 f32 = Float32(x); if (DecomposedFloat32(f32).isIntegerInRepresentableRange()) result = itoa(Int32(f32), buffer) - buffer; From 16d05bbc6d9a1369b393f836d0ccd8ea64fe2057 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 22:41:40 +0100 Subject: [PATCH 139/267] Comparisons --- base/base/BFloat16.h | 22 +++++++++++++++++++++- src/Functions/FunctionsComparison.h | 7 +++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index 9c6196d6aab..f7491b64eb3 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -4,8 +4,28 @@ #include -//using BFloat16 = __bf16; +/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32. + * It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16. + * It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits. + * + * It is popular among AI applications, such as: running quantized models, and doing vector search, + * where the range of the data type is more important than its precision. + * + * It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions. + * But it is rarely utilized by compilers. + * + * The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable. + * It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16. + * Here is a manual implementation of this data type. Only required operations are implemented. + * There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++. + * There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it, + * sometimes giving an "invalid function call" error (which means a sketchy implementation) + * and giving errors during the "instruction select pass" during link-time optimization. + * + * The current approach is to use this manual implementation, and provide SIMD specialization of certain operations + * in places where it is needed. + */ class BFloat16 { private: diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index be0875581a5..bcb9e0641b8 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -721,6 +721,7 @@ private: || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) + || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped)) || (res = executeNumRightType(col_left, col_right_untyped))) return res; @@ -741,6 +742,7 @@ private: || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) + || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped)) || (res = executeNumConstRightType(col_left_const, col_right_untyped))) return res; @@ -1289,9 +1291,10 @@ public: || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) + || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)) || (res = executeNumLeftType(col_left_untyped, col_right_untyped)))) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of the first argument of function {}", col_left_untyped->getName(), getName()); return res; @@ -1339,7 +1342,7 @@ public: getName(), left_type->getName(), right_type->getName()); - /// When Decimal comparing to Float32/64, we convert both of them into Float64. + /// When Decimal comparing to Float32/64/16, we convert both of them into Float64. /// Other systems like MySQL and Spark also do as this. if (left_is_float || right_is_float) { From 92e8fa23ba0073f2caa43d66bab5d99475d3c656 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 23:43:10 +0100 Subject: [PATCH 140/267] Remove obsolete setting from tests --- src/Databases/enableAllExperimentalSettings.cpp | 1 - tests/performance/avg_weighted.xml | 1 - tests/performance/reinterpret_as.xml | 1 - tests/queries/0_stateless/01035_avg.sql | 2 -- .../0_stateless/01182_materialized_view_different_structure.sql | 1 - tests/queries/0_stateless/01440_big_int_exotic_casts.sql | 2 -- .../0_stateless/01554_bloom_filter_index_big_integer_uuid.sql | 2 -- tests/queries/0_stateless/01622_byte_size.sql | 2 -- tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql | 2 -- tests/queries/0_stateless/01804_dictionary_decimal256_type.sql | 2 -- .../0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh | 2 -- 11 files changed, 18 deletions(-) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index d51d2671992..bc2dae55f97 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -24,7 +24,6 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_experimental_dynamic_type", 1); context->setSetting("allow_experimental_json_type", 1); context->setSetting("allow_experimental_vector_similarity_index", 1); - context->setSetting("allow_experimental_bigint_types", 1); context->setSetting("allow_experimental_window_functions", 1); context->setSetting("allow_experimental_geo_types", 1); context->setSetting("allow_experimental_map_type", 1); diff --git a/tests/performance/avg_weighted.xml b/tests/performance/avg_weighted.xml index edf3c19fdfa..ec1b7aae5c2 100644 --- a/tests/performance/avg_weighted.xml +++ b/tests/performance/avg_weighted.xml @@ -1,6 +1,5 @@ - 1 1 8 diff --git a/tests/performance/reinterpret_as.xml b/tests/performance/reinterpret_as.xml index d05ef3bb038..2e0fa0571c3 100644 --- a/tests/performance/reinterpret_as.xml +++ b/tests/performance/reinterpret_as.xml @@ -1,6 +1,5 @@ - 1 15G diff --git a/tests/queries/0_stateless/01035_avg.sql b/tests/queries/0_stateless/01035_avg.sql index a3cb35a80ec..0f7baddaec5 100644 --- a/tests/queries/0_stateless/01035_avg.sql +++ b/tests/queries/0_stateless/01035_avg.sql @@ -1,5 +1,3 @@ -SET allow_experimental_bigint_types=1; - CREATE TABLE IF NOT EXISTS test_01035_avg ( i8 Int8 DEFAULT i64, i16 Int16 DEFAULT i64, diff --git a/tests/queries/0_stateless/01182_materialized_view_different_structure.sql b/tests/queries/0_stateless/01182_materialized_view_different_structure.sql index 485f9985974..7e41172bd0c 100644 --- a/tests/queries/0_stateless/01182_materialized_view_different_structure.sql +++ b/tests/queries/0_stateless/01182_materialized_view_different_structure.sql @@ -20,7 +20,6 @@ SELECT sum(value) FROM (SELECT number, sum(number) AS value FROM (SELECT *, toDe CREATE TABLE src (n UInt64, s FixedString(16)) ENGINE=Memory; CREATE TABLE dst (n UInt8, s String) ENGINE = Memory; CREATE MATERIALIZED VIEW mv TO dst (n String) AS SELECT * FROM src; -SET allow_experimental_bigint_types=1; CREATE TABLE dist (n Int128) ENGINE=Distributed(test_cluster_two_shards, currentDatabase(), mv); INSERT INTO src SELECT number, toString(number) FROM numbers(1000); diff --git a/tests/queries/0_stateless/01440_big_int_exotic_casts.sql b/tests/queries/0_stateless/01440_big_int_exotic_casts.sql index 42fde9da01b..f411af897e8 100644 --- a/tests/queries/0_stateless/01440_big_int_exotic_casts.sql +++ b/tests/queries/0_stateless/01440_big_int_exotic_casts.sql @@ -32,8 +32,6 @@ SELECT number y, toInt128(number) - y, toInt256(number) - y, toUInt256(number) - SELECT -number y, toInt128(number) + y, toInt256(number) + y, toUInt256(number) + y FROM numbers_mt(10) ORDER BY number; -SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64, i256 Int256, u256 UInt256, d256 Decimal256(2)) ENGINE = Memory; diff --git a/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql b/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql index 3472f41092d..f82fe39f439 100644 --- a/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql +++ b/tests/queries/0_stateless/01554_bloom_filter_index_big_integer_uuid.sql @@ -1,5 +1,3 @@ -SET allow_experimental_bigint_types = 1; - CREATE TABLE 01154_test (x Int128, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; INSERT INTO 01154_test VALUES (1), (2), (3); SELECT x FROM 01154_test WHERE x = 1; diff --git a/tests/queries/0_stateless/01622_byte_size.sql b/tests/queries/0_stateless/01622_byte_size.sql index 9f9de4e58e9..f73011f4151 100644 --- a/tests/queries/0_stateless/01622_byte_size.sql +++ b/tests/queries/0_stateless/01622_byte_size.sql @@ -4,8 +4,6 @@ select ''; select '# byteSize'; -set allow_experimental_bigint_types = 1; - -- numbers #0 -- select ''; select 'byteSize for numbers #0'; diff --git a/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql b/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql index 272bd2d7104..57483430cc0 100644 --- a/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql +++ b/tests/queries/0_stateless/01721_dictionary_decimal_p_s.sql @@ -1,6 +1,5 @@ -- Tags: no-parallel -set allow_experimental_bigint_types=1; drop database if exists db_01721; drop table if exists db_01721.table_decimal_dict; drop dictionary if exists db_01721.decimal_dict; @@ -77,4 +76,3 @@ SELECT dictGet('db_01721.decimal_dict', 'Decimal32_', toUInt64(5000)), drop table if exists table_decimal_dict; drop dictionary if exists cache_dict; drop database if exists db_01721; - diff --git a/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql b/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql index 08a8d0feb27..32b029442b9 100644 --- a/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql +++ b/tests/queries/0_stateless/01804_dictionary_decimal256_type.sql @@ -1,7 +1,5 @@ -- Tags: no-parallel -SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS dictionary_decimal_source_table; CREATE TABLE dictionary_decimal_source_table ( diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 1294ba53e82..2a24a931696 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -6,8 +6,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query=" - SET allow_experimental_bigint_types = 1; - DROP TABLE IF EXISTS dictionary_decimal_source_table; CREATE TABLE dictionary_decimal_source_table ( From 19ab7d484a6d7a2346103c5468bca611df03e3d9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 10 Nov 2024 23:50:31 +0100 Subject: [PATCH 141/267] Add an experimental setting --- src/Core/Settings.cpp | 5 ++++- src/Core/SettingsChangesHistory.cpp | 1 + .../parseColumnsListForTableFunction.cpp | 14 ++++++++++++++ .../parseColumnsListForTableFunction.h | 1 + 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 01339226c2d..7c2042ee16d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5729,7 +5729,10 @@ Enable experimental functions for natural language processing. Enable experimental hash functions )", EXPERIMENTAL) \ DECLARE(Bool, allow_experimental_object_type, false, R"( -Allow Object and JSON data types +Allow the obsolete Object data type +)", EXPERIMENTAL) \ + DECLARE(Bool, allow_experimental_bfloat16_type, false, R"( +Allow BFloat16 data type (under development). )", EXPERIMENTAL) \ DECLARE(Bool, allow_experimental_time_series_table, false, R"( Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 0ff9d0a6833..23aeeb47224 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -77,6 +77,7 @@ static std::initializer_list(&data_type)) diff --git a/src/Interpreters/parseColumnsListForTableFunction.h b/src/Interpreters/parseColumnsListForTableFunction.h index 6e00492c0ad..39b9f092d89 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.h +++ b/src/Interpreters/parseColumnsListForTableFunction.h @@ -20,6 +20,7 @@ struct DataTypeValidationSettings bool allow_experimental_object_type = true; bool allow_suspicious_fixed_string_types = true; bool allow_experimental_variant_type = true; + bool allow_experimental_bfloat16_type = true; bool allow_suspicious_variant_types = true; bool validate_nested_types = true; bool allow_experimental_dynamic_type = true; From 1a2ee7929e746395a6f0426b6935887af287fd30 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:16:09 +0100 Subject: [PATCH 142/267] More conversions --- src/Functions/FunctionsConversion.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 7f4ccc338cf..effaa6faa6d 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -1862,11 +1862,6 @@ struct ConvertImpl } } - if constexpr ((std::is_same_v || std::is_same_v) - && !(std::is_same_v || std::is_same_v)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", - TypeName, TypeName); - if constexpr (std::is_same_v || std::is_same_v) { From f042c921ee84ef583f1b76c9d4587b963bd06f45 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:16:28 +0100 Subject: [PATCH 143/267] Distances --- base/base/BFloat16.h | 7 ++ src/Common/CPUID.h | 6 ++ src/Common/TargetSpecific.cpp | 3 + src/Common/TargetSpecific.h | 26 +++++- src/Functions/array/arrayDistance.cpp | 112 +++++++++++++++++++------- 5 files changed, 119 insertions(+), 35 deletions(-) diff --git a/base/base/BFloat16.h b/base/base/BFloat16.h index f7491b64eb3..2df84dbc0f2 100644 --- a/base/base/BFloat16.h +++ b/base/base/BFloat16.h @@ -80,6 +80,13 @@ public: return x & 0b1000000000000000; } + constexpr BFloat16 abs() const + { + BFloat16 res; + res.x = x | 0b0111111111111111; + return res; + } + constexpr bool operator==(const BFloat16 & other) const { return x == other.x; diff --git a/src/Common/CPUID.h b/src/Common/CPUID.h index b49f7706904..b5c26e64d1e 100644 --- a/src/Common/CPUID.h +++ b/src/Common/CPUID.h @@ -266,6 +266,11 @@ inline bool haveAVX512VBMI2() noexcept return haveAVX512F() && ((CPUInfo(0x7, 0).registers.ecx >> 6) & 1u); } +inline bool haveAVX512BF16() noexcept +{ + return haveAVX512F() && ((CPUInfo(0x7, 1).registers.eax >> 5) & 1u); +} + inline bool haveRDRAND() noexcept { return CPUInfo(0x0).registers.eax >= 0x7 && ((CPUInfo(0x1).registers.ecx >> 30) & 1u); @@ -326,6 +331,7 @@ inline bool haveAMXINT8() noexcept OP(AVX512VL) \ OP(AVX512VBMI) \ OP(AVX512VBMI2) \ + OP(AVX512BF16) \ OP(PREFETCHWT1) \ OP(SHA) \ OP(ADX) \ diff --git a/src/Common/TargetSpecific.cpp b/src/Common/TargetSpecific.cpp index 8540c9a9986..4400d9a60b3 100644 --- a/src/Common/TargetSpecific.cpp +++ b/src/Common/TargetSpecific.cpp @@ -23,6 +23,8 @@ UInt32 getSupportedArchs() result |= static_cast(TargetArch::AVX512VBMI); if (CPU::CPUFlagsCache::have_AVX512VBMI2) result |= static_cast(TargetArch::AVX512VBMI2); + if (CPU::CPUFlagsCache::have_AVX512BF16) + result |= static_cast(TargetArch::AVX512BF16); if (CPU::CPUFlagsCache::have_AMXBF16) result |= static_cast(TargetArch::AMXBF16); if (CPU::CPUFlagsCache::have_AMXTILE) @@ -50,6 +52,7 @@ String toString(TargetArch arch) case TargetArch::AVX512BW: return "avx512bw"; case TargetArch::AVX512VBMI: return "avx512vbmi"; case TargetArch::AVX512VBMI2: return "avx512vbmi2"; + case TargetArch::AVX512BF16: return "avx512bf16"; case TargetArch::AMXBF16: return "amxbf16"; case TargetArch::AMXTILE: return "amxtile"; case TargetArch::AMXINT8: return "amxint8"; diff --git a/src/Common/TargetSpecific.h b/src/Common/TargetSpecific.h index f9523f667b2..5584bd1f63a 100644 --- a/src/Common/TargetSpecific.h +++ b/src/Common/TargetSpecific.h @@ -83,9 +83,10 @@ enum class TargetArch : UInt32 AVX512BW = (1 << 4), AVX512VBMI = (1 << 5), AVX512VBMI2 = (1 << 6), - AMXBF16 = (1 << 7), - AMXTILE = (1 << 8), - AMXINT8 = (1 << 9), + AVX512BF16 = (1 << 7), + AMXBF16 = (1 << 8), + AMXTILE = (1 << 9), + AMXINT8 = (1 << 10), }; /// Runtime detection. @@ -102,6 +103,7 @@ String toString(TargetArch arch); /// NOLINTNEXTLINE #define USE_MULTITARGET_CODE 1 +#define AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,avx512bf16"))) #define AVX512VBMI2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2"))) #define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw"))) @@ -111,6 +113,8 @@ String toString(TargetArch arch); #define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt"))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512BF16_SPECIFIC_CODE \ + _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,avx512bf16\"))),apply_to=function)") # define BEGIN_AVX512VBMI2_SPECIFIC_CODE \ _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2\"))),apply_to=function)") # define BEGIN_AVX512VBMI_SPECIFIC_CODE \ @@ -197,6 +201,14 @@ namespace TargetSpecific::AVX512VBMI2 { \ } \ END_TARGET_SPECIFIC_CODE +#define DECLARE_AVX512BF16_SPECIFIC_CODE(...) \ +BEGIN_AVX512BF16_SPECIFIC_CODE \ +namespace TargetSpecific::AVX512BF16 { \ + DUMMY_FUNCTION_DEFINITION \ + using namespace DB::TargetSpecific::AVX512BF16; \ + __VA_ARGS__ \ +} \ +END_TARGET_SPECIFIC_CODE #else @@ -211,6 +223,7 @@ END_TARGET_SPECIFIC_CODE #define DECLARE_AVX512BW_SPECIFIC_CODE(...) #define DECLARE_AVX512VBMI_SPECIFIC_CODE(...) #define DECLARE_AVX512VBMI2_SPECIFIC_CODE(...) +#define DECLARE_AVX512BF16_SPECIFIC_CODE(...) #endif @@ -229,7 +242,8 @@ DECLARE_AVX2_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512F_SPECIFIC_CODE(__VA_ARGS__) \ DECLARE_AVX512BW_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512VBMI_SPECIFIC_CODE (__VA_ARGS__) \ -DECLARE_AVX512VBMI2_SPECIFIC_CODE (__VA_ARGS__) +DECLARE_AVX512VBMI2_SPECIFIC_CODE (__VA_ARGS__) \ +DECLARE_AVX512BF16_SPECIFIC_CODE (__VA_ARGS__) DECLARE_DEFAULT_CODE( constexpr auto BuildArch = TargetArch::Default; /// NOLINT @@ -263,6 +277,10 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE( constexpr auto BuildArch = TargetArch::AVX512VBMI2; /// NOLINT ) // DECLARE_AVX512VBMI2_SPECIFIC_CODE +DECLARE_AVX512BF16_SPECIFIC_CODE( + constexpr auto BuildArch = TargetArch::AVX512BF16; /// NOLINT +) // DECLARE_AVX512BF16_SPECIFIC_CODE + /** Runtime Dispatch helpers for class members. * * Example of usage: diff --git a/src/Functions/array/arrayDistance.cpp b/src/Functions/array/arrayDistance.cpp index a1f48747eb6..da49359c422 100644 --- a/src/Functions/array/arrayDistance.cpp +++ b/src/Functions/array/arrayDistance.cpp @@ -14,6 +14,31 @@ #include #endif + +namespace +{ + inline BFloat16 fabs(BFloat16 x) + { + return x.abs(); + } + + inline BFloat16 sqrt(BFloat16 x) + { + return BFloat16(::sqrtf(Float32(x))); + } + + template + inline BFloat16 pow(BFloat16 x, T p) + { + return BFloat16(::powf(Float32(x), Float32(p))); + } + + inline BFloat16 fmax(BFloat16 x, BFloat16 y) + { + return BFloat16(::fmaxf(Float32(x), Float32(y))); + } +} + namespace DB { namespace ErrorCodes @@ -34,7 +59,7 @@ struct L1Distance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template @@ -65,7 +90,7 @@ struct L2Distance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template @@ -82,7 +107,7 @@ struct L2Distance #if USE_MULTITARGET_CODE template - AVX512_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( + AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( const ResultType * __restrict data_x, const ResultType * __restrict data_y, size_t i_max, @@ -90,19 +115,29 @@ struct L2Distance size_t & i_y, State & state) { - static constexpr bool is_float32 = std::is_same_v; - __m512 sums; - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) sums = _mm512_setzero_ps(); else sums = _mm512_setzero_pd(); - constexpr size_t n = is_float32 ? 16 : 8; + constexpr size_t n = sizeof(__m512) / sizeof(ResultType); for (; i_x + n < i_max; i_x += n, i_y += n) { - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2) + { + __m512 x_1 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_x + i_x))); + __m512 x_2 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_x + i_x + n / 2))); + __m512 y_1 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_y + i_y))); + __m512 y_2 = _mm512_cvtpbh_ps(_mm256_loadu_ps(reinterpret_cast(data_y + i_y + n / 2))); + + __m512 differences_1 = _mm512_sub_ps(x_1, y_1); + __m512 differences_2 = _mm512_sub_ps(x_2, y_2); + sums = _mm512_fmadd_ps(differences_1, differences_1, sums); + sums = _mm512_fmadd_ps(differences_2, differences_2, sums); + } + else if constexpr (sizeof(ResultType) == 4) { __m512 x = _mm512_loadu_ps(data_x + i_x); __m512 y = _mm512_loadu_ps(data_y + i_y); @@ -118,7 +153,7 @@ struct L2Distance } } - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) state.sum = _mm512_reduce_add_ps(sums); else state.sum = _mm512_reduce_add_pd(sums); @@ -128,7 +163,7 @@ struct L2Distance template static ResultType finalize(const State & state, const ConstParams &) { - return sqrt(state.sum); + return sqrt(ResultType(state.sum)); } }; @@ -156,13 +191,13 @@ struct LpDistance template struct State { - FloatType sum = 0; + FloatType sum{}; }; template static void accumulate(State & state, ResultType x, ResultType y, const ConstParams & params) { - state.sum += static_cast(std::pow(fabs(x - y), params.power)); + state.sum += static_cast(pow(fabs(x - y), params.power)); } template @@ -174,7 +209,7 @@ struct LpDistance template static ResultType finalize(const State & state, const ConstParams & params) { - return static_cast(std::pow(state.sum, params.inverted_power)); + return static_cast(pow(state.sum, params.inverted_power)); } }; @@ -187,7 +222,7 @@ struct LinfDistance template struct State { - FloatType dist = 0; + FloatType dist{}; }; template @@ -218,9 +253,9 @@ struct CosineDistance template struct State { - FloatType dot_prod = 0; - FloatType x_squared = 0; - FloatType y_squared = 0; + FloatType dot_prod{}; + FloatType x_squared{}; + FloatType y_squared{}; }; template @@ -241,7 +276,7 @@ struct CosineDistance #if USE_MULTITARGET_CODE template - AVX512_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( + AVX512BF16_FUNCTION_SPECIFIC_ATTRIBUTE static void accumulateCombine( const ResultType * __restrict data_x, const ResultType * __restrict data_y, size_t i_max, @@ -249,13 +284,11 @@ struct CosineDistance size_t & i_y, State & state) { - static constexpr bool is_float32 = std::is_same_v; - __m512 dot_products; __m512 x_squareds; __m512 y_squareds; - if constexpr (is_float32) + if constexpr (sizeof(ResultType) <= 4) { dot_products = _mm512_setzero_ps(); x_squareds = _mm512_setzero_ps(); @@ -268,11 +301,19 @@ struct CosineDistance y_squareds = _mm512_setzero_pd(); } - constexpr size_t n = is_float32 ? 16 : 8; + constexpr size_t n = sizeof(__m512) / sizeof(ResultType); for (; i_x + n < i_max; i_x += n, i_y += n) { - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2) + { + __m512 x = _mm512_loadu_ps(data_x + i_x); + __m512 y = _mm512_loadu_ps(data_y + i_y); + dot_products = _mm512_dpbf16_ps(dot_products, x, y); + x_squareds = _mm512_dpbf16_ps(x_squareds, x, x); + y_squareds = _mm512_dpbf16_ps(y_squareds, y, y); + } + if constexpr (sizeof(ResultType) == 4) { __m512 x = _mm512_loadu_ps(data_x + i_x); __m512 y = _mm512_loadu_ps(data_y + i_y); @@ -290,7 +331,7 @@ struct CosineDistance } } - if constexpr (is_float32) + if constexpr (sizeof(ResultType) == 2 || sizeof(ResultType) == 4) { state.dot_prod = _mm512_reduce_add_ps(dot_products); state.x_squared = _mm512_reduce_add_ps(x_squareds); @@ -308,7 +349,7 @@ struct CosineDistance template static ResultType finalize(const State & state, const ConstParams &) { - return 1 - state.dot_prod / sqrt(state.x_squared * state.y_squared); + return ResultType(1) - state.dot_prod / sqrt(state.x_squared * state.y_squared); } }; @@ -353,11 +394,13 @@ public: return std::make_shared(); case TypeIndex::Float32: return std::make_shared(); + case TypeIndex::BFloat16: + return std::make_shared(); default: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), common_type->getName()); } @@ -367,6 +410,9 @@ public: { switch (result_type->getTypeId()) { + case TypeIndex::BFloat16: + return executeWithResultType(arguments, input_rows_count); + break; case TypeIndex::Float32: return executeWithResultType(arguments, input_rows_count); break; @@ -388,6 +434,7 @@ public: ACTION(Int16) \ ACTION(Int32) \ ACTION(Int64) \ + ACTION(BFloat16) \ ACTION(Float32) \ ACTION(Float64) @@ -412,7 +459,7 @@ private: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), type_x->getName()); } @@ -437,7 +484,7 @@ private: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments of function {} has nested type {}. " - "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + "Supported types: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, BFloat16, Float32, Float64.", getName(), type_y->getName()); } @@ -548,13 +595,15 @@ private: /// SIMD optimization: process multiple elements in both input arrays at once. /// To avoid combinatorial explosion of SIMD kernels, focus on - /// - the two most common input/output types (Float32 x Float32) --> Float32 and (Float64 x Float64) --> Float64 instead of 10 x - /// 10 input types x 2 output types, + /// - the three most common input/output types (BFloat16 x BFloat16) --> BFloat16, + /// (Float32 x Float32) --> Float32 and (Float64 x Float64) --> Float64 + /// instead of 10 x 10 input types x 2 output types, /// - const/non-const inputs instead of non-const/non-const inputs /// - the two most common metrics L2 and cosine distance, /// - the most powerful SIMD instruction set (AVX-512F). #if USE_MULTITARGET_CODE - if constexpr (std::is_same_v && std::is_same_v) /// ResultType is Float32 or Float64 + /// ResultType is BFloat16, Float32 or Float64 + if constexpr (std::is_same_v && std::is_same_v) { if constexpr (std::is_same_v || std::is_same_v) @@ -638,4 +687,5 @@ FunctionPtr createFunctionArrayL2SquaredDistance(ContextPtr context_) { return F FunctionPtr createFunctionArrayLpDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } FunctionPtr createFunctionArrayLinfDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } FunctionPtr createFunctionArrayCosineDistance(ContextPtr context_) { return FunctionArrayDistance::create(context_); } + } From 6dee7e42766177e30712f7c1c341663b3fba2f91 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:24:57 +0100 Subject: [PATCH 144/267] Fix style --- src/Databases/enableAllExperimentalSettings.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index bc2dae55f97..1be54664bc9 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -27,6 +27,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_experimental_window_functions", 1); context->setSetting("allow_experimental_geo_types", 1); context->setSetting("allow_experimental_map_type", 1); + context->setSetting("allow_experimental_bigint_types", 1); + context->setSetting("allow_experimental_bfloat16_type", 1); context->setSetting("allow_deprecated_error_prone_window_functions", 1); context->setSetting("allow_suspicious_low_cardinality_types", 1); From 89b015cecfad9a6a8f44039efa556d209ea50239 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:25:11 +0100 Subject: [PATCH 145/267] Do not compile BFloat16 --- src/DataTypes/IDataType.h | 3 ++- src/DataTypes/Native.cpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 1e41d6b2eba..8f06526ddbb 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -411,7 +411,8 @@ struct WhichDataType constexpr bool isBFloat16() const { return idx == TypeIndex::BFloat16; } constexpr bool isFloat32() const { return idx == TypeIndex::Float32; } constexpr bool isFloat64() const { return idx == TypeIndex::Float64; } - constexpr bool isFloat() const { return isBFloat16() || isFloat32() || isFloat64(); } + constexpr bool isNativeFloat() const { return isFloat32() || isFloat64(); } + constexpr bool isFloat() const { return isNativeFloat() || isBFloat16(); } constexpr bool isNativeNumber() const { return isNativeInteger() || isFloat(); } constexpr bool isNumber() const { return isInteger() || isFloat() || isDecimal(); } diff --git a/src/DataTypes/Native.cpp b/src/DataTypes/Native.cpp index 5dc490b0bd5..53354d7c6e0 100644 --- a/src/DataTypes/Native.cpp +++ b/src/DataTypes/Native.cpp @@ -37,7 +37,7 @@ bool canBeNativeType(const IDataType & type) return canBeNativeType(*data_type_nullable.getNestedType()); } - return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() + return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isNativeFloat() || data_type.isDate() || data_type.isDate32() || data_type.isDateTime() || data_type.isEnum(); } From 968a559917577a63464bbaf87d3e724912cb7d5a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 00:59:37 +0100 Subject: [PATCH 146/267] Add a test --- .../queries/0_stateless/03269_bf16.reference | 45 ++++++++++ tests/queries/0_stateless/03269_bf16.sql | 88 +++++++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 tests/queries/0_stateless/03269_bf16.reference create mode 100644 tests/queries/0_stateless/03269_bf16.sql diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference new file mode 100644 index 00000000000..daa26cb252f --- /dev/null +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -0,0 +1,45 @@ +1 -1 1.09375 -1.09375 1 -1 1.09375 -1.09375 18446744000000000000 -0 inf -inf nan +1.09375 1.09375 1.09375 1 +1 1 0 1 1 +0 2.1875 1.1962891 1 Float32 Float32 Float32 Float64 +-0.006250000000000089 2.19375 1.203125 1.0057142857142858 Float64 Float64 Float64 Float64 +0 0 1 0 +1000 1000 1 0 +2000 2000 1 0 +3000 2992 0 8 +4000 4000 1 0 +5000 4992 0 8 +6000 5984 0 16 +7000 6976 0 24 +8000 8000 1 0 +9000 8960 0 40 +49995000 49855104 4999.5 4985.5104 0 0 9999 9984 10000 925 10000 925 +0 0 1 0 +1000 1000 1 0 +2000 2000 1 0 +3000 2992 0 8 +4000 4000 1 0 +5000 4992 0 8 +6000 5984 0 16 +7000 6976 0 24 +8000 8000 1 0 +9000 8960 0 40 +49995000 49855104 4999.5 4985.5104 0 0 9999 9984 10000 925 10000 925 +Row 1: +────── +a32: [0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129.5,130,130.5,131,131.5,132,132.5,133,133.5,134,134.5,135,135.5,136,136.5,137,137.5,138,138.5,139,139.5,140,140.5,141,141.5,142,142.5,143,143.5,144,144.5,145,145.5,146,146.5,147,147.5,148,148.5,149,149.5,150,150.5,151,151.5,152,152.5,153,153.5,154,154.5,155,155.5,156,156.5,157,157.5,158,158.5,159,159.5,160,160.5,161,161.5,162,162.5,163,163.5,164,164.5,165,165.5,166,166.5,167,167.5,168,168.5,169,169.5,170,170.5,171,171.5,172,172.5,173,173.5,174,174.5,175,175.5,176,176.5,177,177.5,178,178.5,179,179.5,180,180.5,181,181.5,182,182.5,183,183.5,184,184.5,185,185.5,186,186.5,187,187.5,188,188.5,189,189.5,190,190.5,191,191.5] +a16: [0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128,129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136,137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144,145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152,153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160,161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168,169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176,177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184,185,185,186,186,187,187,188,188,189,189,190,190,191,191] +a32_1: [1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129.5,130,130.5,131,131.5,132,132.5,133,133.5,134,134.5,135,135.5,136,136.5,137,137.5,138,138.5,139,139.5,140,140.5,141,141.5,142,142.5,143,143.5,144,144.5,145,145.5,146,146.5,147,147.5,148,148.5,149,149.5,150,150.5,151,151.5,152,152.5,153,153.5,154,154.5,155,155.5,156,156.5,157,157.5,158,158.5,159,159.5,160,160.5,161,161.5,162,162.5,163,163.5,164,164.5,165,165.5,166,166.5,167,167.5,168,168.5,169,169.5,170,170.5,171,171.5,172,172.5,173,173.5,174,174.5,175,175.5,176,176.5,177,177.5,178,178.5,179,179.5,180,180.5,181,181.5,182,182.5,183,183.5,184,184.5,185,185.5,186,186.5,187,187.5,188,188.5,189,189.5,190,190.5,191,191.5,192,192.5] +a16_1: [1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,25,25.5,26,26.5,27,27.5,28,28.5,29,29.5,30,30.5,31,31.5,32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49,49.5,50,50.5,51,51.5,52,52.5,53,53.5,54,54.5,55,55.5,56,56.5,57,57.5,58,58.5,59,59.5,60,60.5,61,61.5,62,62.5,63,63.5,64,64.5,65,65.5,66,66.5,67,67.5,68,68.5,69,69.5,70,70.5,71,71.5,72,72.5,73,73.5,74,74.5,75,75.5,76,76.5,77,77.5,78,78.5,79,79.5,80,80.5,81,81.5,82,82.5,83,83.5,84,84.5,85,85.5,86,86.5,87,87.5,88,88.5,89,89.5,90,90.5,91,91.5,92,92.5,93,93.5,94,94.5,95,95.5,96,96.5,97,97.5,98,98.5,99,99.5,100,100.5,101,101.5,102,102.5,103,103.5,104,104.5,105,105.5,106,106.5,107,107.5,108,108.5,109,109.5,110,110.5,111,111.5,112,112.5,113,113.5,114,114.5,115,115.5,116,116.5,117,117.5,118,118.5,119,119.5,120,120.5,121,121.5,122,122.5,123,123.5,124,124.5,125,125.5,126,126.5,127,127.5,128,128.5,129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136,137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144,145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152,153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160,161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168,169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176,177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184,185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192] +dotProduct(a32, a32_1): 4736944 -- 4.74 million +dotProduct(a16, a16_1): 4726688 -- 4.73 million +cosineDistance(a32, a32_1): 0.000010093636084174129 +cosineDistance(a16, a16_1): 0.00001010226319664298 +L2Distance(a32, a32_1): 19.595917942265423 +L2Distance(a16, a16_1): 19.595917942265423 +L1Distance(a32, a32_1): 384 +L1Distance(a16, a16_1): 384 +LinfDistance(a32, a32_1): 1 +LinfDistance(a16, a16_1): 1 +LpDistance(a32, a32_1, 5): 3.2875036590344515 +LpDistance(a16, a16_1, 5): 3.2875036590344515 diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql new file mode 100644 index 00000000000..375cca73b62 --- /dev/null +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -0,0 +1,88 @@ +SET allow_experimental_bfloat16_type = 1; + +-- This is a smoke test, non exhaustive. + +-- Conversions + +SELECT + 1::BFloat16, + -1::BFloat16, + 1.1::BFloat16, + -1.1::BFloat16, + CAST(1 AS BFloat16), + CAST(-1 AS BFloat16), + CAST(1.1 AS BFloat16), + CAST(-1.1 AS BFloat16), + CAST(0xFFFFFFFFFFFFFFFF AS BFloat16), + CAST(-0.0 AS BFloat16), + CAST(inf AS BFloat16), + CAST(-inf AS BFloat16), + CAST(nan AS BFloat16); + +-- Conversions back + +SELECT + CAST(1.1::BFloat16 AS BFloat16), + CAST(1.1::BFloat16 AS Float32), + CAST(1.1::BFloat16 AS Float64), + CAST(1.1::BFloat16 AS Int8); + +-- Comparisons + +SELECT + 1.1::BFloat16 = 1.1::BFloat16, + 1.1::BFloat16 < 1.1, + 1.1::BFloat16 > 1.1, + 1.1::BFloat16 > 1, + 1.1::BFloat16 = 1.09375; + +-- Arithmetic + +SELECT + 1.1::BFloat16 - 1.1::BFloat16 AS a, + 1.1::BFloat16 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1::BFloat16 AS c, + 1.1::BFloat16 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +SELECT + 1.1::BFloat16 - 1.1 AS a, + 1.1 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1 AS c, + 1.1 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +-- Tables + +DROP TABLE IF EXISTS t; +CREATE TEMPORARY TABLE t (n UInt64, x BFloat16); +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; + +-- Aggregate functions + +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- MergeTree + +DROP TABLE t; +CREATE TABLE t (n UInt64, x BFloat16) ENGINE = MergeTree ORDER BY n; +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- Distances + +WITH + arrayMap(x -> toFloat32(x) / 2, range(384)) AS a32, + arrayMap(x -> toBFloat16(x) / 2, range(384)) AS a16, + arrayMap(x -> x + 1, a32) AS a32_1, + arrayMap(x -> x + 1, a16) AS a16_1 +SELECT a32, a16, a32_1, a16_1, + dotProduct(a32, a32_1), dotProduct(a16, a16_1), + cosineDistance(a32, a32_1), cosineDistance(a16, a16_1), + L2Distance(a32, a32_1), L2Distance(a16, a16_1), + L1Distance(a32, a32_1), L1Distance(a16, a16_1), + LinfDistance(a32, a32_1), LinfDistance(a16, a16_1), + LpDistance(a32, a32_1, 5), LpDistance(a16, a16_1, 5) +FORMAT Vertical; From bfeefa2c8a5ce71dd0cc90d68d831d694aef3418 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:02:10 +0100 Subject: [PATCH 147/267] Introspection --- src/Functions/FunctionsBinaryRepresentation.cpp | 1 + tests/queries/0_stateless/03269_bf16.reference | 1 + tests/queries/0_stateless/03269_bf16.sql | 7 +++++++ 3 files changed, 9 insertions(+) diff --git a/src/Functions/FunctionsBinaryRepresentation.cpp b/src/Functions/FunctionsBinaryRepresentation.cpp index c8e8f167e4c..50a3c0862f4 100644 --- a/src/Functions/FunctionsBinaryRepresentation.cpp +++ b/src/Functions/FunctionsBinaryRepresentation.cpp @@ -296,6 +296,7 @@ public: tryExecuteUIntOrInt(column, res_column) || tryExecuteString(column, res_column) || tryExecuteFixedString(column, res_column) || + tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) || tryExecuteDecimal(column, res_column) || diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference index daa26cb252f..31395d92e2b 100644 --- a/tests/queries/0_stateless/03269_bf16.reference +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -43,3 +43,4 @@ LinfDistance(a32, a32_1): 1 LinfDistance(a16, a16_1): 1 LpDistance(a32, a32_1, 5): 3.2875036590344515 LpDistance(a16, a16_1, 5): 3.2875036590344515 +1.09375 8C3F 1000110000111111 2 16268 8C3F diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql index 375cca73b62..de4e2f6da47 100644 --- a/tests/queries/0_stateless/03269_bf16.sql +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -86,3 +86,10 @@ SELECT a32, a16, a32_1, a16_1, LinfDistance(a32, a32_1), LinfDistance(a16, a16_1), LpDistance(a32, a32_1, 5), LpDistance(a16, a16_1, 5) FORMAT Vertical; + +-- Introspection + +SELECT 1.1::BFloat16 AS x, + hex(x), bin(x), + byteSize(x), + reinterpretAsUInt16(x), hex(reinterpretAsString(x)); From 3e50cf94fe858e8440ffd69040334356326b97db Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:04:55 +0100 Subject: [PATCH 148/267] Rounding --- tests/queries/0_stateless/03269_bf16.reference | 1 + tests/queries/0_stateless/03269_bf16.sql | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/03269_bf16.reference b/tests/queries/0_stateless/03269_bf16.reference index 31395d92e2b..896cc307623 100644 --- a/tests/queries/0_stateless/03269_bf16.reference +++ b/tests/queries/0_stateless/03269_bf16.reference @@ -44,3 +44,4 @@ LinfDistance(a16, a16_1): 1 LpDistance(a32, a32_1, 5): 3.2875036590344515 LpDistance(a16, a16_1, 5): 3.2875036590344515 1.09375 8C3F 1000110000111111 2 16268 8C3F +1.09375 1 1.09375 1.0859375 0 diff --git a/tests/queries/0_stateless/03269_bf16.sql b/tests/queries/0_stateless/03269_bf16.sql index de4e2f6da47..b332a6e3119 100644 --- a/tests/queries/0_stateless/03269_bf16.sql +++ b/tests/queries/0_stateless/03269_bf16.sql @@ -93,3 +93,8 @@ SELECT 1.1::BFloat16 AS x, hex(x), bin(x), byteSize(x), reinterpretAsUInt16(x), hex(reinterpretAsString(x)); + +-- Rounding (this could be not towards the nearest) + +SELECT 1.1::BFloat16 AS x, + round(x), round(x, 1), round(x, 2), round(x, -1); From 3a855f501cd5d16ff97e9dde8b6fcb2d3b7ae497 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 02:15:31 +0100 Subject: [PATCH 149/267] Cleanups --- base/base/DecomposedFloat.h | 2 +- base/base/wide_integer.h | 1 - base/base/wide_integer_impl.h | 8 +------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/base/base/DecomposedFloat.h b/base/base/DecomposedFloat.h index 3bd059cb21c..fef91adefb0 100644 --- a/base/base/DecomposedFloat.h +++ b/base/base/DecomposedFloat.h @@ -230,4 +230,4 @@ struct DecomposedFloat using DecomposedFloat64 = DecomposedFloat; using DecomposedFloat32 = DecomposedFloat; -using DecomposedFloat16 = DecomposedFloat<__bf16>; +using DecomposedFloat16 = DecomposedFloat; diff --git a/base/base/wide_integer.h b/base/base/wide_integer.h index baf6e490ada..f3a4dc9e6d5 100644 --- a/base/base/wide_integer.h +++ b/base/base/wide_integer.h @@ -118,7 +118,6 @@ public: constexpr operator long double() const noexcept; constexpr operator double() const noexcept; constexpr operator float() const noexcept; - constexpr operator __bf16() const noexcept; struct _impl; diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index d0bbd7df9d4..3787971a20e 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -154,7 +154,7 @@ struct common_type, Arithmetic> static_assert(wide::ArithmeticConcept()); using type = std::conditional_t< - std::is_floating_point_v || std::is_same_v, + std::is_floating_point_v, Arithmetic, std::conditional_t< sizeof(Arithmetic) * 8 < Bits, @@ -1300,12 +1300,6 @@ constexpr integer::operator float() const noexcept return static_cast(static_cast(*this)); } -template -constexpr integer::operator __bf16() const noexcept -{ - return static_cast<__bf16>(static_cast(*this)); -} - // Unary operators template constexpr integer operator~(const integer & lhs) noexcept From ab79efe40f8785a7bd947cd5919feafedfb88259 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Mon, 11 Nov 2024 14:07:19 +0800 Subject: [PATCH 150/267] make scale argument not optional --- .../functions/type-conversion-functions.md | 4 +- src/Functions/parseDateTime.cpp | 130 +++++++----------- .../03252_parse_datetime64.reference | 13 +- .../0_stateless/03252_parse_datetime64.sql | 38 ++--- ..._parse_datetime64_in_joda_syntax.reference | 5 - .../03252_parse_datetime64_in_joda_syntax.sql | 12 +- 6 files changed, 69 insertions(+), 133 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8043b21744a..72e6fda03f7 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6880,7 +6880,7 @@ parseDateTime64(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 6 if not specified. +- `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. @@ -6907,7 +6907,7 @@ parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) **Arguments** - `str` — The String to be parsed -- `scale` - The precision for [DateTime64](../data-types/datetime64.md). Optional, default 0 if not specified. +- `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 7190c1ad6f8..72e3ba661ca 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -607,87 +607,71 @@ namespace DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - FunctionArgumentDescriptors mandatory_args{ - {"time", static_cast(&isString), nullptr, "String"} - }; - + FunctionArgumentDescriptors mandatory_args; FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - optional_args = {{"scale/format", static_cast( - [](const IDataType & data_type) -> bool { return isUInt(data_type) || isString(data_type); } - ), nullptr, "UInt or String"}, - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} + { + mandatory_args = { + {"time", static_cast(&isString), nullptr, "String"}, + {"scale", static_cast(&isUInt8), nullptr, "UInt8"} }; - else optional_args = { {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; + } + else + { + mandatory_args = {{"time", static_cast(&isString), nullptr, "String"}}; + optional_args = { + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; + } validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); DataTypePtr data_type; if constexpr (return_type == ReturnType::DateTime64) { - UInt32 scale = 0; - if (arguments.size() == 1) + UInt8 scale = 0; + if (isUInt8(arguments[1].type)) { - /// In MySQL parse syntax, the scale of microseond is 6. - if constexpr (parse_syntax == ParseSyntax::MySQL) - scale = 6; - } - else - { - if (isUInt(arguments[1].type)) - { - const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); - if (col_scale) - scale = col_scale->getValue(); - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The input scale value may exceed the max scale value of `DateTime64`: {}.", - maxScaleOfDateTime64); - } + const auto * col_scale = checkAndGetColumnConst(arguments[1].column.get()); + if (col_scale) + scale = col_scale->getValue(); else - { - if constexpr (parse_syntax == ParseSyntax::MySQL) - scale = 6; - } + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument is not Const(UInt8) type."); + } + if (parse_syntax == ParseSyntax::MySQL && scale != 6) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale value {} of MySQL parse syntax is not 6.", std::to_string(scale)); + if (scale > maxScaleOfDateTime64) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The scale argument's value {} exceed the max scale value {}.", std::to_string(scale), std::to_string(maxScaleOfDateTime64)); - /// Construct the return type `DataTypDateTime64` with scale and time zone name. The scale value can be specified or be extracted - /// from the format string by counting how many 'S' characters are contained in the format's microsceond fragment. - String format = getFormat(arguments, scale); - std::vector instructions = parseFormat(format); - for (const auto & instruction : instructions) + String format = getFormat(arguments, scale); + std::vector instructions = parseFormat(format); + for (const auto & instruction : instructions) + { + /// Check scale by counting how may 'S' characters exists in the format string. + const String & fragment = instruction.getFragment(); + UInt32 s_cnt = 0; + for (char ch : fragment) { - const String & fragment = instruction.getFragment(); - UInt32 val = 0; - for (char ch : fragment) + if (ch != 'S') { - if (ch != 'S') - { - val = 0; - break; - } - else - val++; + s_cnt = 0; + break; } - /// If the scale is already specified by the second argument, but it not equals the value that extract from the format string, - /// then we should throw an exception; If the scale is not specified, then we should set its value as the extracted one. - if (val != 0 && scale != 0 && val != scale) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The scale of input format string {} not equals the given scale value {}.", - format, - scale); - else if (scale == 0 && val != 0) - scale = val; + else + s_cnt++; } - if (scale > maxScaleOfDateTime64) + /// If the number of 'S' character in format string not euqals the scale, then throw an exception to report error. + if (s_cnt != 0 && s_cnt != scale) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The scale of the input format string {} exceed the max scale value {}.", + "The scale of input format string {} not equals the given scale value {}.", format, - maxScaleOfDateTime64); + std::to_string(scale)); } data_type = std::make_shared(scale, time_zone_name); } @@ -2267,18 +2251,7 @@ namespace { size_t format_arg_index = 1; if constexpr (return_type == ReturnType::DateTime64) - { - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22.22.123', 3), then the format is treated - /// as default value `yyyy-MM-dd HH:mm:ss`. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 'yyyy-MM-dd HH:mm:ss.SSS')`, - /// then the second argument is the format. - /// When parse `DateTime64` like `parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-05 12:22:22.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS')`, - /// then the third argument is the format. - if (arguments.size() > 1 && isString(removeNullable(arguments[1].type))) - format_arg_index = 1; - else - format_arg_index = 2; - } + format_arg_index = 2; if (arguments.size() <= format_arg_index) { @@ -2311,18 +2284,11 @@ namespace const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const { - if (arguments.size() < 3) + if (return_type == ReturnType::DateTime && arguments.size() < 3) return DateLUT::instance(); - else if constexpr (return_type == ReturnType::DateTime64) - { - /// If the return type is DateTime64, and the second argument is UInt type for scale, then it has 2 reasonable situations: - /// the first like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT+8') - /// the second like parseDateTime64[InJodaSyntax][OrZero/OrNull]('2024-11-07 17:27.30.123456', 6, '%Y-%m-%d %H:%i:%s.%f'). And for the - /// first one, we should return the last argument as its timezone, and for the second one, we should return the default time zone as - /// `DateLUT::instance()`. - if (isUInt(arguments[1].type) && arguments.size() < 4) - return DateLUT::instance(); - } + else if (return_type == ReturnType::DateTime64 && arguments.size() < 4) + return DateLUT::instance(); + size_t timezone_arg_index = arguments.size() - 1; const auto * col = checkAndGetColumnConst(arguments[timezone_arg_index].column.get()); if (!col) diff --git a/tests/queries/0_stateless/03252_parse_datetime64.reference b/tests/queries/0_stateless/03252_parse_datetime64.reference index 27dcef6bf68..263c9b5d8ea 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64.reference @@ -1,17 +1,8 @@ -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 +2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 1970-01-01 08:00:00.000000 -1970-01-01 08:00:00.000 -1970-01-01 08:00:00.000 2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -1970-01-01 08:00:00.000 2024-10-09 10:30:10.123456 -\N -\N -\N -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -2024-10-09 10:30:10.123456 2024-10-09 10:30:10.123456 -\N +1970-01-01 08:00:00.000000 diff --git a/tests/queries/0_stateless/03252_parse_datetime64.sql b/tests/queries/0_stateless/03252_parse_datetime64.sql index d28b6e586f7..2a6ef254887 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64.sql @@ -1,32 +1,22 @@ set session_timezone = 'Asia/Shanghai'; -select parseDateTime64('2024-10-09 10:30:10.123456'); -select parseDateTime64('2024-10-09 10:30:10.123'); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); -select parseDateTime64('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-10-09 10:30:10.123456', 6), parseDateTime64('2024-10-09 10:30:10.123456', 6,'%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); select parseDateTime64('2024-10-09 10:30:10.123', 6, '%Y-%m-%d %H:%i:%s.%f'); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123'); -select parseDateTime64OrZero('2024-10-09 10:30:10', 3); -select parseDateTime64OrZero('2024-10-09 10:30:10.', 3); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64OrZero('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64OrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64OrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); -select parseDateTime64OrZero('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7'); +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%fffff'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123'); -select parseDateTime64OrNull('2024-10-09 10:30:10', 3); -select parseDateTime64OrNull('2024-10-09 10:30:10.', 3); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select parseDateTime64OrNull('2024-10-09 10:30:10', 3); -- { serverError BAD_ARGUMENTS } select parseDateTime64OrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64OrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } -select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', '%Y-%m-%d %H:%i:%s.%f'); -select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f'), parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; -select parseDateTime64OrNull('2024-10-09 10:30:10.123', 3, '%Y-%m-%d %H:%i:%s.%f'); \ No newline at end of file +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6), parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6,'%Y-%m-%d %H:%i:%s.%f'); +select parseDateTime64OrNull('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%f', 'Etc/GMT-7');; +select parseDateTime64OrZero('2024-10-09 10:30:10.123456', 6, '%Y-%m-%d %H:%i:%s.%fffff'); \ No newline at end of file diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference index 0b4a28c4b38..99252ce55ca 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference @@ -1,12 +1,9 @@ 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 2024-10-10 01:30:10.123456 2024-10-10 01:30:10.123456 1970-01-01 08:00:00.000 -1970-01-01 08:00:00.000 -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 @@ -15,8 +12,6 @@ 2024-10-10 01:30:10.123456 1970-01-01 08:00:00.000000 \N -\N -2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-09 10:30:10.123 2024-10-09 10:30:10.000123 2024-10-10 02:42:10.123456 diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql index 8482677e9c9..bcb0fb5a362 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql @@ -1,11 +1,9 @@ set session_timezone = 'Asia/Shanghai'; select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 3); -- { serverError NOT_ENOUGH_SPACE } -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.', 3); -- { serverError CANNOT_PARSE_DATETIME } select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); @@ -17,11 +15,9 @@ select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-8000', 6, 'yyyy-M select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 3); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.', 3); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); @@ -33,11 +29,9 @@ select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-8000', 6, ' select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456ABCD', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 3); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.', 3); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', -3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10', 9); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6); -select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSS'), parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 3, 'yyyy-MM-dd HH:mm:ss.SSSS'); -- { serverError BAD_ARGUMENTS } select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0812', 6, 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); From 0768e0b265dc3a7d83d3a4c3ea9ba8625fe70994 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Mon, 11 Nov 2024 14:26:58 +0800 Subject: [PATCH 151/267] update doc & comments --- .../functions/type-conversion-functions.md | 10 ++++----- src/Functions/parseDateTime.cpp | 22 ++++++------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 72e6fda03f7..a92d7055fd5 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6874,14 +6874,14 @@ Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datet **Syntax** ``` sql -parseDateTime64(str[, [scale, [format[, timezone]]]]) +parseDateTime64(str, scale, [format[, timezone]]) ``` **Arguments** -- `str` — The String to be parsed +- `str` — The String to be parsed. - `scale` - The scale of [DateTime64](../data-types/datetime64.md). -- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. +- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. **Returned value(s)** @@ -6901,12 +6901,12 @@ Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datet **Syntax** ``` sql -parseDateTime64InJodaSyntax(str[, [scale, [format[, timezone]]]]) +parseDateTime64InJodaSyntax(str, scale, [format[, timezone]]) ``` **Arguments** -- `str` — The String to be parsed +- `str` — The String to be parsed. - `scale` - The scale of [DateTime64](../data-types/datetime64.md). - `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional. diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 72e3ba661ca..9fea8a4f130 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -608,26 +608,18 @@ namespace DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { FunctionArgumentDescriptors mandatory_args; - FunctionArgumentDescriptors optional_args; if constexpr (return_type == ReturnType::DateTime64) - { mandatory_args = { {"time", static_cast(&isString), nullptr, "String"}, - {"scale", static_cast(&isUInt8), nullptr, "UInt8"} + {"scale", static_cast(&isUInt8), &isColumnConst, "UInt8"} }; - optional_args = { - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - } else - { mandatory_args = {{"time", static_cast(&isString), nullptr, "String"}}; - optional_args = { - {"format", static_cast(&isString), nullptr, "String"}, - {"timezone", static_cast(&isString), &isColumnConst, "const String"} - }; - } + + FunctionArgumentDescriptors optional_args{ + {"format", static_cast(&isString), nullptr, "String"}, + {"timezone", static_cast(&isString), &isColumnConst, "const String"} + }; validateFunctionArguments(*this, arguments, mandatory_args, optional_args); String time_zone_name = getTimeZone(arguments).getTimeZone(); @@ -644,7 +636,7 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument is not Const(UInt8) type."); } if (parse_syntax == ParseSyntax::MySQL && scale != 6) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale value {} of MySQL parse syntax is not 6.", std::to_string(scale)); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument's value {} of MySQL parse syntax is not 6.", std::to_string(scale)); if (scale > maxScaleOfDateTime64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The scale argument's value {} exceed the max scale value {}.", std::to_string(scale), std::to_string(maxScaleOfDateTime64)); From 39e01d47b1892b2049d18fc19803949d1bfcda51 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 11 Nov 2024 11:54:04 +0000 Subject: [PATCH 152/267] Fix style check --- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index fbe2993b7c6..494326c0ed0 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -227,7 +227,7 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} children={}", step->getName(), children.size()); if (children.empty()) { @@ -347,7 +347,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr } if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Chosen query: {}", res->dumpTree()); return res; } From 899702f0569a5f2f3fff6839b9ac2003968de853 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Nov 2024 13:08:48 +0100 Subject: [PATCH 153/267] fix test --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 0ff9d0a6833..3e0c17bcf24 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -77,6 +77,7 @@ static std::initializer_list Date: Mon, 11 Nov 2024 08:55:53 -0800 Subject: [PATCH 154/267] Change enable_http_compression setting's default value to 1 --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index d321f5dbdf2..ec07d1cdc4b 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -1788,7 +1788,7 @@ Possible values: - 0 — Disabled. - 1 — Enabled. -)", 0) \ +)", 1) \ DECLARE(Int64, http_zlib_compression_level, 3, R"( Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression). From f60d35161f9d4b34fac0f51ad566906301cb3762 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 11 Nov 2024 08:56:50 -0800 Subject: [PATCH 155/267] Update docs for ru/ and zh/ --- docs/ru/operations/settings/settings.md | 2 +- docs/zh/operations/settings/settings.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 84bbf6c83d3..bbe1f071381 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко - 0 — выключена. - 1 — включена. -Значение по умолчанию: 0. +Значение по умолчанию: 1. ## http_zlib_compression_level {#settings-http_zlib_compression_level} diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md index 5e59196f56c..baa4fcb0754 100644 --- a/docs/zh/operations/settings/settings.md +++ b/docs/zh/operations/settings/settings.md @@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。 - 0 — Disabled. - 1 — Enabled. -默认值:0。 +默认值:1。 ## http_zlib_compression_level {#settings-http_zlib_compression_level} From c7ad0b897a4f10da1889545a74c391f53252c163 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 11 Nov 2024 16:54:38 +0000 Subject: [PATCH 156/267] allow to use constant adaptive granularity for part --- .../optimizeUseAggregateProjection.cpp | 2 +- src/Processors/QueryPlan/PartsSplitter.cpp | 6 +- .../QueryPlan/ReadFromMergeTree.cpp | 4 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 30 +-- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 +- .../MergeTree/IMergeTreeDataPartWriter.cpp | 40 +++- .../MergeTree/IMergeTreeDataPartWriter.h | 8 +- .../MergeTree/IMergedBlockOutputStream.h | 2 +- .../LoadedMergeTreeDataPartInfoForReader.h | 2 +- src/Storages/MergeTree/MergeList.cpp | 2 +- src/Storages/MergeTree/MergeTask.cpp | 33 ++- src/Storages/MergeTree/MergeTask.h | 3 +- src/Storages/MergeTree/MergeTreeData.cpp | 13 +- .../MergeTree/MergeTreeDataPartCompact.cpp | 31 ++- .../MergeTree/MergeTreeDataPartCompact.h | 7 +- .../MergeTree/MergeTreeDataPartWide.cpp | 38 +++- .../MergeTree/MergeTreeDataPartWide.h | 7 +- .../MergeTreeDataPartWriterCompact.cpp | 19 +- .../MergeTreeDataPartWriterCompact.h | 2 +- .../MergeTreeDataPartWriterOnDisk.cpp | 72 ++----- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 2 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 59 +++--- .../MergeTree/MergeTreeDataPartWriterWide.h | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 37 ++-- .../MergeTree/MergeTreeDataWriter.cpp | 16 ++ .../MergeTree/MergeTreeIndexGranularity.cpp | 188 ++++++++++-------- .../MergeTree/MergeTreeIndexGranularity.h | 92 ++++----- .../MergeTreeIndexGranularityAdaptive.cpp | 151 ++++++++++++++ .../MergeTreeIndexGranularityAdaptive.h | 36 ++++ .../MergeTreeIndexGranularityConstant.cpp | 150 ++++++++++++++ .../MergeTreeIndexGranularityConstant.h | 41 ++++ .../MergeTree/MergeTreeIndexGranularityInfo.h | 2 +- .../MergeTree/MergeTreeRangeReader.cpp | 1 + src/Storages/MergeTree/MergeTreeReadTask.cpp | 2 +- .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- src/Storages/MergeTree/MergeTreeSettings.cpp | 2 + .../MergeTree/MergedBlockOutputStream.cpp | 37 +++- .../MergeTree/MergedBlockOutputStream.h | 4 +- .../MergedColumnOnlyOutputStream.cpp | 24 +-- .../MergeTree/MergedColumnOnlyOutputStream.h | 13 +- src/Storages/MergeTree/MutateTask.cpp | 28 ++- src/Storages/MergeTree/RangesInDataPart.cpp | 2 +- src/Storages/StorageMergeTreeIndex.cpp | 6 +- 43 files changed, 844 insertions(+), 376 deletions(-) create mode 100644 src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp create mode 100644 src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h create mode 100644 src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp create mode 100644 src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index dee16bfcb1a..de8c42e0a1c 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -647,7 +647,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu range.begin = exact_ranges[i].end; ordinary_reading_marks -= exact_ranges[i].end - exact_ranges[i].begin; - exact_count += part_with_ranges.data_part->index_granularity.getRowsCountInRange(exact_ranges[i]); + exact_count += part_with_ranges.data_part->index_granularity->getRowsCountInRange(exact_ranges[i]); ++i; } diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 57fd41e2a32..2b860f45219 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -201,7 +201,7 @@ public: size_t getMarkRows(size_t part_idx, size_t mark) const { - return parts[part_idx].data_part->index_granularity.getMarkRows(mark); + return parts[part_idx].data_part->index_granularity->getMarkRows(mark); } private: const RangesInDataParts & parts; @@ -444,7 +444,7 @@ SplitPartsRangesResult splitPartsRanges(RangesInDataParts ranges_in_data_parts, parts_ranges.push_back( {index_access.getValue(part_index, range.begin), range, part_index, PartsRangesIterator::EventType::RangeStart}); - const bool value_is_defined_at_end_mark = range.end < index_granularity.getMarksCount(); + const bool value_is_defined_at_end_mark = range.end < index_granularity->getMarksCount(); if (!value_is_defined_at_end_mark) continue; @@ -667,7 +667,7 @@ std::pair, std::vector> splitIntersecting PartRangeIndex parts_range_start_index(parts_range_start); parts_ranges_queue.push({std::move(parts_range_start), std::move(parts_range_start_index)}); - const bool value_is_defined_at_end_mark = range.end < index_granularity.getMarksCount(); + const bool value_is_defined_at_end_mark = range.end < index_granularity->getMarksCount(); if (!value_is_defined_at_end_mark) continue; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3186df6a6b3..44cd22df3ee 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -663,7 +663,7 @@ Pipe ReadFromMergeTree::readInOrder( part_with_ranges.ranges.size(), read_type == ReadType::InReverseOrder ? " reverse " : " ", part_with_ranges.data_part->name, total_rows, - part_with_ranges.data_part->index_granularity.getMarkStartingRow(part_with_ranges.ranges.front().begin)); + part_with_ranges.data_part->index_granularity->getMarkStartingRow(part_with_ranges.ranges.front().begin)); MergeTreeSelectAlgorithmPtr algorithm; if (read_type == ReadType::InReverseOrder) @@ -1727,7 +1727,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( return std::make_shared(std::move(result)); for (const auto & part : parts) - total_marks_pk += part->index_granularity.getMarksCountWithoutFinal(); + total_marks_pk += part->index_granularity->getMarksCountWithoutFinal(); parts_before_pk = parts.size(); auto reader_settings = getMergeTreeReaderSettings(context_, query_info_); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..774d79bfdf2 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -38,6 +39,8 @@ #include #include #include +#include "Storages/MergeTree/MergeTreeIndexGranularity.h" +#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" #include @@ -652,7 +655,7 @@ void IMergeTreeDataPart::assertOnDisk() const UInt64 IMergeTreeDataPart::getMarksCount() const { - return index_granularity.getMarksCount(); + return index_granularity->getMarksCount(); } UInt64 IMergeTreeDataPart::getExistingBytesOnDisk() const @@ -931,13 +934,13 @@ void IMergeTreeDataPart::loadIndex() const for (size_t i = 0; i < key_size; ++i) { loaded_index[i] = primary_key.data_types[i]->createColumn(); - loaded_index[i]->reserve(index_granularity.getMarksCount()); + loaded_index[i]->reserve(index_granularity->getMarksCount()); } String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()); String index_path = fs::path(getDataPartStorage().getRelativePath()) / index_name; auto index_file = metadata_manager->read(index_name); - size_t marks_count = index_granularity.getMarksCount(); + size_t marks_count = index_granularity->getMarksCount(); Serializations key_serializations(key_size); for (size_t j = 0; j < key_size; ++j) @@ -1350,7 +1353,7 @@ void IMergeTreeDataPart::loadRowsCount() assertEOF(*buf); }; - if (index_granularity.empty()) + if (index_granularity->empty()) { rows_count = 0; } @@ -1385,9 +1388,9 @@ void IMergeTreeDataPart::loadRowsCount() backQuote(column.name), rows_in_column, name, rows_count); } - size_t last_possibly_incomplete_mark_rows = index_granularity.getLastNonFinalMarkRows(); + size_t last_possibly_incomplete_mark_rows = index_granularity->getLastNonFinalMarkRows(); /// All this rows have to be written in column - size_t index_granularity_without_last_mark = index_granularity.getTotalRows() - last_possibly_incomplete_mark_rows; + size_t index_granularity_without_last_mark = index_granularity->getTotalRows() - last_possibly_incomplete_mark_rows; /// We have more rows in column than in index granularity without last possibly incomplete mark if (rows_in_column < index_granularity_without_last_mark) { @@ -1397,7 +1400,7 @@ void IMergeTreeDataPart::loadRowsCount() "and size of single value, " "but index granularity in part {} without last mark has {} rows, which " "is more than in column", - backQuote(column.name), rows_in_column, name, index_granularity.getTotalRows()); + backQuote(column.name), rows_in_column, name, index_granularity->getTotalRows()); } /// In last mark we actually written less or equal rows than stored in last mark of index granularity @@ -1445,8 +1448,8 @@ void IMergeTreeDataPart::loadRowsCount() column.name, column_size, sizeof_field); } - size_t last_mark_index_granularity = index_granularity.getLastNonFinalMarkRows(); - size_t rows_approx = index_granularity.getTotalRows(); + size_t last_mark_index_granularity = index_granularity->getLastNonFinalMarkRows(); + size_t rows_approx = index_granularity->getTotalRows(); if (!(rows_count <= rows_approx && rows_approx < rows_count + last_mark_index_granularity)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected size of column {}: " "{} rows, expected {}+-{} rows according to the index", @@ -1509,7 +1512,7 @@ UInt64 IMergeTreeDataPart::readExistingRowsCount() while (current_row < rows_count) { - size_t rows_to_read = index_granularity.getMarkRows(current_mark); + size_t rows_to_read = index_granularity->getMarkRows(current_mark); continue_reading = (current_mark != 0); Columns result; @@ -1957,6 +1960,9 @@ void IMergeTreeDataPart::initializeIndexGranularityInfo() index_granularity_info = MergeTreeIndexGranularityInfo(storage, *mrk_type); else index_granularity_info = MergeTreeIndexGranularityInfo(storage, part_type); + + /// It may be converted to constant index granularity after loading it. + index_granularity = std::make_unique(); } void IMergeTreeDataPart::remove() @@ -2230,9 +2236,9 @@ void IMergeTreeDataPart::checkConsistency(bool require_part_metadata) const "part_state: [{}]", columns.toString(), index_granularity_info.getMarkSizeInBytes(columns.size()), - index_granularity.getMarksCount(), + index_granularity->getMarksCount(), index_granularity_info.describe(), - index_granularity.describe(), + index_granularity->describe(), part_state); e.addMessage(debug_info); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index b41a1d840e1..7646aed9339 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -321,7 +321,7 @@ public: /// Amount of rows between marks /// As index always loaded into memory - MergeTreeIndexGranularity index_granularity; + MergeTreeIndexGranularityPtr index_granularity; /// Index that for each part stores min and max values of a set of columns. This allows quickly excluding /// parts based on conditions on these columns imposed by a query. diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index dbfdbbdea88..0e70310675f 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -1,5 +1,11 @@ +#include +#include #include #include +#include "Storages/MergeTree/MergeTreeIndexGranularity.h" +#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" +#include "Storages/MergeTree/MergeTreeIndexGranularityConstant.h" +#include "Storages/MergeTree/MergeTreeSettings.h" #include namespace DB @@ -11,7 +17,6 @@ namespace ErrorCodes extern const int NO_SUCH_COLUMN_IN_TABLE; } - Block getIndexBlockAndPermute(const Block & block, const Names & names, const IColumn::Permutation * permutation) { Block result; @@ -57,7 +62,7 @@ IMergeTreeDataPartWriter::IMergeTreeDataPartWriter( const StorageMetadataPtr & metadata_snapshot_, const VirtualsDescriptionPtr & virtual_columns_, const MergeTreeWriterSettings & settings_, - const MergeTreeIndexGranularity & index_granularity_) + MergeTreeIndexGranularityPtr index_granularity_) : data_part_name(data_part_name_) , serializations(serializations_) , index_granularity_info(index_granularity_info_) @@ -68,7 +73,7 @@ IMergeTreeDataPartWriter::IMergeTreeDataPartWriter( , settings(settings_) , with_final_mark(settings.can_use_adaptive_granularity) , data_part_storage(data_part_storage_) - , index_granularity(index_granularity_) + , index_granularity(std::move(index_granularity_)) { } @@ -145,7 +150,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartCompactWriter( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity); + MergeTreeIndexGranularityPtr computed_index_granularity); MergeTreeDataPartWriterPtr createMergeTreeDataPartWideWriter( const String & data_part_name_, @@ -162,8 +167,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWideWriter( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity); - + MergeTreeIndexGranularityPtr computed_index_granularity); MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( MergeTreeDataPartType part_type, @@ -182,12 +186,26 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) + MergeTreeIndexGranularityPtr computed_index_granularity) { if (part_type == MergeTreeDataPartType::Compact) - return createMergeTreeDataPartCompactWriter(data_part_name_, logger_name_, serializations_, data_part_storage_, - index_granularity_info_, storage_settings_, columns_list, column_positions, metadata_snapshot, virtual_columns, indices_to_recalc, stats_to_recalc_, - marks_file_extension_, default_codec_, writer_settings, computed_index_granularity); + return createMergeTreeDataPartCompactWriter( + data_part_name_, + logger_name_, + serializations_, + data_part_storage_, + index_granularity_info_, + storage_settings_, + columns_list, + column_positions, + metadata_snapshot, + virtual_columns, + indices_to_recalc, + stats_to_recalc_, + marks_file_extension_, + default_codec_, + writer_settings, + std::move(computed_index_granularity)); if (part_type == MergeTreeDataPartType::Wide) return createMergeTreeDataPartWideWriter( data_part_name_, @@ -204,7 +222,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( marks_file_extension_, default_codec_, writer_settings, - computed_index_granularity); + std::move(computed_index_granularity)); throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown part type: {}", part_type.toString()); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index d1c76505d7c..244a733dc4e 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -36,7 +36,7 @@ public: const StorageMetadataPtr & metadata_snapshot_, const VirtualsDescriptionPtr & virtual_columns_, const MergeTreeWriterSettings & settings_, - const MergeTreeIndexGranularity & index_granularity_ = {}); + MergeTreeIndexGranularityPtr index_granularity_); virtual ~IMergeTreeDataPartWriter(); @@ -52,7 +52,7 @@ public: PlainMarksByName releaseCachedMarks(); - const MergeTreeIndexGranularity & getIndexGranularity() const { return index_granularity; } + MergeTreeIndexGranularityPtr getIndexGranularity() const { return index_granularity; } protected: SerializationPtr getSerialization(const String & column_name) const; @@ -74,7 +74,7 @@ protected: MutableDataPartStoragePtr data_part_storage; MutableColumns index_columns; - MergeTreeIndexGranularity index_granularity; + MergeTreeIndexGranularityPtr index_granularity; /// Marks that will be saved to cache on finish. PlainMarksByName cached_marks; }; @@ -99,6 +99,6 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWriter( const String & marks_file_extension, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity); + MergeTreeIndexGranularityPtr computed_index_granularity); } diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index 7dd6d720170..cdaf599b5ea 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -29,7 +29,7 @@ public: virtual void write(const Block & block) = 0; - const MergeTreeIndexGranularity & getIndexGranularity() const + MergeTreeIndexGranularityPtr getIndexGranularity() const { return writer->getIndexGranularity(); } diff --git a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h index aff1cf0edb0..050b80b5979 100644 --- a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h @@ -51,7 +51,7 @@ public: const MergeTreeIndexGranularityInfo & getIndexGranularityInfo() const override { return data_part->index_granularity_info; } - const MergeTreeIndexGranularity & getIndexGranularity() const override { return data_part->index_granularity; } + const MergeTreeIndexGranularity & getIndexGranularity() const override { return *data_part->index_granularity; } const SerializationInfoByName & getSerializationInfos() const override { return data_part->getSerializationInfos(); } diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index b80d7fccc91..e88472b9dda 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -52,7 +52,7 @@ MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMuta total_size_bytes_compressed += source_part->getBytesOnDisk(); total_size_bytes_uncompressed += source_part->getTotalColumnsSize().data_uncompressed; total_size_marks += source_part->getMarksCount(); - total_rows_count += source_part->index_granularity.getTotalRows(); + total_rows_count += source_part->index_granularity->getTotalRows(); } if (!future_part->parts.empty()) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 193622d7b87..d69559de62d 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,7 @@ namespace ProfileEvents namespace DB { + namespace Setting { extern const SettingsBool compile_sort_description; @@ -94,6 +96,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsUInt64 vertical_merge_algorithm_min_rows_to_activate; extern const MergeTreeSettingsBool vertical_merge_remote_filesystem_prefetch; extern const MergeTreeSettingsBool prewarm_mark_cache; + extern const MergeTreeSettingsBool use_const_adaptive_granularity; } namespace ErrorCodes @@ -409,10 +412,11 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const }; auto mutations_snapshot = global_ctx->data->getMutationsSnapshot(params); + const auto & storage_settings = *global_ctx->data->getSettings(); SerializationInfo::Settings info_settings = { - .ratio_of_defaults_for_sparse = (*global_ctx->data->getSettings())[MergeTreeSetting::ratio_of_defaults_for_sparse_serialization], + .ratio_of_defaults_for_sparse = storage_settings[MergeTreeSetting::ratio_of_defaults_for_sparse_serialization], .choose_kind = true, }; @@ -461,6 +465,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const ctx->sum_input_rows_upper_bound = global_ctx->merge_list_element_ptr->total_rows_count; ctx->sum_compressed_bytes_upper_bound = global_ctx->merge_list_element_ptr->total_size_bytes_compressed; + ctx->sum_uncompressed_bytes_upper_bound = global_ctx->merge_list_element_ptr->total_size_bytes_uncompressed; global_ctx->chosen_merge_algorithm = chooseMergeAlgorithm(); global_ctx->merge_list_element_ptr->merge_algorithm.store(global_ctx->chosen_merge_algorithm, std::memory_order_relaxed); @@ -504,8 +509,14 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge algorithm must be chosen"); } - /// If merge is vertical we cannot calculate it - ctx->blocks_are_granules_size = (global_ctx->chosen_merge_algorithm == MergeAlgorithm::Vertical); + bool use_adaptive_granularity = global_ctx->new_data_part->index_granularity_info.mark_type.adaptive; + bool use_const_adaptive_granularity = storage_settings[MergeTreeSetting::use_const_adaptive_granularity]; + + /// If merge is vertical we cannot calculate it. + /// If granularity is constant we don't need to calculate it. + ctx->blocks_are_granules_size = use_adaptive_granularity + && !use_const_adaptive_granularity + && global_ctx->chosen_merge_algorithm == MergeAlgorithm::Vertical; /// Merged stream will be created and available as merged_stream variable createMergedStream(); @@ -547,7 +558,14 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } } - bool save_marks_in_cache = (*global_ctx->data->getSettings())[MergeTreeSetting::prewarm_mark_cache] && global_ctx->context->getMarkCache(); + auto index_granularity_ptr = createMergeTreeIndexGranularity( + ctx->sum_input_rows_upper_bound, + ctx->sum_uncompressed_bytes_upper_bound, + storage_settings, + global_ctx->new_data_part->index_granularity_info, + ctx->blocks_are_granules_size); + + bool save_marks_in_cache = storage_settings[MergeTreeSetting::prewarm_mark_cache] && global_ctx->context->getMarkCache(); global_ctx->to = std::make_shared( global_ctx->new_data_part, @@ -556,6 +574,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const MergeTreeIndexFactory::instance().getMany(global_ctx->merging_skip_indexes), getStatisticsForColumns(global_ctx->merging_columns, global_ctx->metadata_snapshot), ctx->compression_codec, + std::move(index_granularity_ptr), global_ctx->txn ? global_ctx->txn->tid : Tx::PrehistoricTID, /*reset_columns=*/ true, save_marks_in_cache, @@ -1095,12 +1114,12 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const global_ctx->new_data_part, global_ctx->metadata_snapshot, columns_list, - ctx->compression_codec, column_pipepline.indexes_to_recalc, getStatisticsForColumns(columns_list, global_ctx->metadata_snapshot), + ctx->compression_codec, + global_ctx->to->getIndexGranularity(), &global_ctx->written_offset_columns, - save_marks_in_cache, - global_ctx->to->getIndexGranularity()); + save_marks_in_cache); ctx->column_elems_written = 0; } diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 53792165987..cff4e5e763c 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -243,7 +243,6 @@ private: bool need_remove_expired_values{false}; bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; - size_t sum_input_rows_upper_bound{0}; std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; @@ -261,7 +260,9 @@ private: std::function is_cancelled{}; /// Local variables for this stage + size_t sum_input_rows_upper_bound{0}; size_t sum_compressed_bytes_upper_bound{0}; + size_t sum_uncompressed_bytes_upper_bound{0}; bool blocks_are_granules_size{false}; LoggerPtr log{getLogger("MergeTask::PrepareStage")}; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 69979809c31..473f99ab4dd 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -21,6 +21,7 @@ #include #include #include +#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" #include #include #include @@ -95,6 +96,7 @@ #include #include #include +#include #include #include #include @@ -7237,7 +7239,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( /// It's extremely rare that some parts have final marks while others don't. To make it /// straightforward, disable minmax_count projection when `max(pk)' encounters any part with /// no final mark. - if (need_primary_key_max_column && !part->index_granularity.hasFinalMark()) + if (need_primary_key_max_column && !part->index_granularity->hasFinalMark()) return {}; real_parts.push_back(part); @@ -8960,10 +8962,15 @@ std::pair MergeTreeData::createE auto compression_codec = getContext()->chooseCompressionCodec(0, 0); const auto & index_factory = MergeTreeIndexFactory::instance(); - MergedBlockOutputStream out(new_data_part, metadata_snapshot, columns, + MergedBlockOutputStream out( + new_data_part, + metadata_snapshot, + columns, index_factory.getMany(metadata_snapshot->getSecondaryIndices()), ColumnsStatistics{}, - compression_codec, txn ? txn->tid : Tx::PrehistoricTID); + compression_codec, + std::make_shared(), + txn ? txn->tid : Tx::PrehistoricTID); bool sync_on_insert = (*settings)[MergeTreeSetting::fsync_after_insert]; diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 14c2da82de1..bc10f119c01 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB @@ -15,6 +16,11 @@ namespace ErrorCodes extern const int BAD_SIZE_OF_FILE_IN_DATA_PART; } +namespace MergeTreeSetting +{ + extern MergeTreeSettingsBool enable_index_granularity_compression; +} + MergeTreeDataPartCompact::MergeTreeDataPartCompact( const MergeTreeData & storage_, const String & name_, @@ -62,7 +68,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartCompactWriter( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) + MergeTreeIndexGranularityPtr computed_index_granularity) { NamesAndTypesList ordered_columns_list; std::copy_if(columns_list.begin(), columns_list.end(), std::back_inserter(ordered_columns_list), @@ -76,7 +82,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartCompactWriter( data_part_name_, logger_name_, serializations_, data_part_storage_, index_granularity_info_, storage_settings_, ordered_columns_list, metadata_snapshot, virtual_columns, indices_to_recalc, stats_to_recalc_, marks_file_extension_, - default_codec_, writer_settings, computed_index_granularity); + default_codec_, writer_settings, std::move(computed_index_granularity)); } @@ -95,8 +101,11 @@ void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*eac } void MergeTreeDataPartCompact::loadIndexGranularityImpl( - MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const IDataPartStorage & data_part_storage_) + MergeTreeIndexGranularityPtr & index_granularity_ptr, + const MergeTreeIndexGranularityInfo & index_granularity_info_, + size_t columns_count, + const IDataPartStorage & data_part_storage_, + const MergeTreeSettings & storage_settings) { if (!index_granularity_info_.mark_type.adaptive) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeDataPartCompact cannot be created with non-adaptive granularity."); @@ -122,10 +131,14 @@ void MergeTreeDataPartCompact::loadIndexGranularityImpl( marks_reader->ignore(columns_count * sizeof(MarkInCompressedFile)); size_t granularity; readBinaryLittleEndian(granularity, *marks_reader); - index_granularity_.appendMark(granularity); + index_granularity_ptr->appendMark(granularity); } - index_granularity_.setInitialized(); + if (storage_settings[MergeTreeSetting::enable_index_granularity_compression]) + { + if (auto new_granularity_ptr = index_granularity_ptr->optimize()) + index_granularity_ptr = std::move(new_granularity_ptr); + } } void MergeTreeDataPartCompact::loadIndexGranularity() @@ -133,7 +146,7 @@ void MergeTreeDataPartCompact::loadIndexGranularity() if (columns.empty()) throw Exception(ErrorCodes::NO_FILE_IN_DATA_PART, "No columns in part {}", name); - loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), getDataPartStorage()); + loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), getDataPartStorage(), *storage.getSettings()); } void MergeTreeDataPartCompact::loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const @@ -152,7 +165,7 @@ void MergeTreeDataPartCompact::loadMarksToCache(const Names & column_names, Mark info_for_read, mark_cache, index_granularity_info.getMarksFilePath(DATA_FILE_NAME), - index_granularity.getMarksCount(), + index_granularity->getMarksCount(), index_granularity_info, /*save_marks_in_cache=*/ true, read_settings, @@ -227,7 +240,7 @@ void MergeTreeDataPartCompact::doCheckConsistency(bool require_part_metadata) co getDataPartStorage().getRelativePath(), std::string(fs::path(getDataPartStorage().getFullPath()) / mrk_file_name)); - UInt64 expected_file_size = index_granularity_info.getMarkSizeInBytes(columns.size()) * index_granularity.getMarksCount(); + UInt64 expected_file_size = index_granularity_info.getMarkSizeInBytes(columns.size()) * index_granularity->getMarksCount(); if (expected_file_size != file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 8e279571578..aa87bf53fa7 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -60,8 +60,11 @@ public: protected: static void loadIndexGranularityImpl( - MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const IDataPartStorage & data_part_storage_); + MergeTreeIndexGranularityPtr & index_granularity_, + const MergeTreeIndexGranularityInfo & index_granularity_info_, + size_t columns_count, + const IDataPartStorage & data_part_storage_, + const MergeTreeSettings & storage_settings); void doCheckConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index c515d645253..0330068fd25 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -17,6 +18,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +namespace MergeTreeSetting +{ + extern MergeTreeSettingsBool enable_index_granularity_compression; +} + MergeTreeDataPartWide::MergeTreeDataPartWide( const MergeTreeData & storage_, const String & name_, @@ -68,14 +74,14 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWideWriter( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) + MergeTreeIndexGranularityPtr computed_index_granularity) { return std::make_unique( data_part_name_, logger_name_, serializations_, data_part_storage_, index_granularity_info_, storage_settings_, columns_list, metadata_snapshot, virtual_columns, indices_to_recalc, stats_to_recalc_, marks_file_extension_, - default_codec_, writer_settings, computed_index_granularity); + default_codec_, writer_settings, std::move(computed_index_granularity)); } @@ -114,8 +120,11 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( } void MergeTreeDataPartWide::loadIndexGranularityImpl( - MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name) + MergeTreeIndexGranularityPtr & index_granularity_ptr, + MergeTreeIndexGranularityInfo & index_granularity_info_, + const IDataPartStorage & data_part_storage_, + const std::string & any_column_file_name, + const MergeTreeSettings & storage_settings) { index_granularity_info_.changeGranularityIfRequired(data_part_storage_); @@ -127,12 +136,13 @@ void MergeTreeDataPartWide::loadIndexGranularityImpl( std::string(fs::path(data_part_storage_.getFullPath()) / marks_file_path)); size_t marks_file_size = data_part_storage_.getFileSize(marks_file_path); + size_t fixed_granularity = index_granularity_info_.fixed_index_granularity; if (!index_granularity_info_.mark_type.adaptive && !index_granularity_info_.mark_type.compressed) { /// The most easy way - no need to read the file, everything is known from its size. size_t marks_count = marks_file_size / index_granularity_info_.getMarkSizeInBytes(); - index_granularity_.resizeWithFixedGranularity(marks_count, index_granularity_info_.fixed_index_granularity); /// all the same + index_granularity_ptr = std::make_shared(fixed_granularity, fixed_granularity, marks_count, false); } else { @@ -145,6 +155,7 @@ void MergeTreeDataPartWide::loadIndexGranularityImpl( marks_reader = std::make_unique(std::move(marks_file)); size_t marks_count = 0; + while (!marks_reader->eof()) { MarkInCompressedFile mark; @@ -157,15 +168,20 @@ void MergeTreeDataPartWide::loadIndexGranularityImpl( if (index_granularity_info_.mark_type.adaptive) { readBinaryLittleEndian(granularity, *marks_reader); - index_granularity_.appendMark(granularity); + index_granularity_ptr->appendMark(granularity); } } if (!index_granularity_info_.mark_type.adaptive) - index_granularity_.resizeWithFixedGranularity(marks_count, index_granularity_info_.fixed_index_granularity); /// all the same + { + index_granularity_ptr = std::make_shared(fixed_granularity, fixed_granularity, marks_count, false); + } + else if (storage_settings[MergeTreeSetting::enable_index_granularity_compression]) + { + if (auto new_granularity_ptr = index_granularity_ptr->optimize()) + index_granularity_ptr = std::move(new_granularity_ptr); + } } - - index_granularity_.setInitialized(); } void MergeTreeDataPartWide::loadIndexGranularity() @@ -179,7 +195,7 @@ void MergeTreeDataPartWide::loadIndexGranularity() "There are no files for column {} in part {}", columns.front().name, getDataPartStorage().getFullPath()); - loadIndexGranularityImpl(index_granularity, index_granularity_info, getDataPartStorage(), *any_column_filename); + loadIndexGranularityImpl(index_granularity, index_granularity_info, getDataPartStorage(), *any_column_filename, *storage.getSettings()); } void MergeTreeDataPartWide::loadMarksToCache(const Names & column_names, MarkCache * mark_cache) const @@ -209,7 +225,7 @@ void MergeTreeDataPartWide::loadMarksToCache(const Names & column_names, MarkCac info_for_read, mark_cache, index_granularity_info.getMarksFilePath(*stream_name), - index_granularity.getMarksCount(), + index_granularity->getMarksCount(), index_granularity_info, /*save_marks_in_cache=*/ true, read_settings, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 022a5fb746c..638fac6d674 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -55,8 +55,11 @@ public: protected: static void loadIndexGranularityImpl( - MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name); + MergeTreeIndexGranularityPtr & index_granularity_ptr, + MergeTreeIndexGranularityInfo & index_granularity_info_, + const IDataPartStorage & data_part_storage_, + const std::string & any_column_file_name, + const MergeTreeSettings & storage_settings); void doCheckConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 67a2c1ee9f1..b4136f41943 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -25,13 +25,13 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, - const MergeTreeIndexGranularity & index_granularity_) + MergeTreeIndexGranularityPtr index_granularity_) : MergeTreeDataPartWriterOnDisk( data_part_name_, logger_name_, serializations_, data_part_storage_, index_granularity_info_, storage_settings_, columns_list_, metadata_snapshot_, virtual_columns_, indices_to_recalc_, stats_to_recalc, marks_file_extension_, - default_codec_, settings_, index_granularity_) + default_codec_, settings_, std::move(index_granularity_)) , plain_file(getDataPartStorage().writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, settings.max_compress_block_size, @@ -200,13 +200,13 @@ void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::P header = result_block.cloneEmpty(); columns_buffer.add(result_block.mutateColumns()); - size_t current_mark_rows = index_granularity.getMarkRows(getCurrentMark()); + size_t current_mark_rows = index_granularity->getMarkRows(getCurrentMark()); size_t rows_in_buffer = columns_buffer.size(); if (rows_in_buffer >= current_mark_rows) { Block flushed_block = header.cloneWithColumns(columns_buffer.releaseColumns()); - auto granules_to_write = getGranulesToWrite(index_granularity, flushed_block.rows(), getCurrentMark(), /* last_block = */ false); + auto granules_to_write = getGranulesToWrite(*index_granularity, flushed_block.rows(), getCurrentMark(), /* last_block = */ false); writeDataBlockPrimaryIndexAndSkipIndices(flushed_block, granules_to_write); setCurrentMark(getCurrentMark() + granules_to_write.size()); calculateAndSerializeStatistics(flushed_block); @@ -285,12 +285,11 @@ void MergeTreeDataPartWriterCompact::fillDataChecksums(MergeTreeDataPartChecksum if (columns_buffer.size() != 0) { auto block = header.cloneWithColumns(columns_buffer.releaseColumns()); - auto granules_to_write = getGranulesToWrite(index_granularity, block.rows(), getCurrentMark(), /* last_block = */ true); + auto granules_to_write = getGranulesToWrite(*index_granularity, block.rows(), getCurrentMark(), /*last_block=*/ true); if (!granules_to_write.back().is_complete) { /// Correct last mark as it should contain exact amount of rows. - index_granularity.popMark(); - index_granularity.appendMark(granules_to_write.back().rows_to_write); + index_granularity->adjustLastMark(granules_to_write.back().rows_to_write); } writeDataBlockPrimaryIndexAndSkipIndices(block, granules_to_write); } @@ -386,11 +385,11 @@ static void fillIndexGranularityImpl( void MergeTreeDataPartWriterCompact::fillIndexGranularity(size_t index_granularity_for_block, size_t rows_in_block) { size_t index_offset = 0; - if (index_granularity.getMarksCount() > getCurrentMark()) - index_offset = index_granularity.getMarkRows(getCurrentMark()) - columns_buffer.size(); + if (index_granularity->getMarksCount() > getCurrentMark()) + index_offset = index_granularity->getMarkRows(getCurrentMark()) - columns_buffer.size(); fillIndexGranularityImpl( - index_granularity, + *index_granularity, index_offset, index_granularity_for_block, rows_in_block); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index 20c47fb8314..299fad41ac3 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -25,7 +25,7 @@ public: const String & marks_file_extension, const CompressionCodecPtr & default_codec, const MergeTreeWriterSettings & settings, - const MergeTreeIndexGranularity & index_granularity); + MergeTreeIndexGranularityPtr index_granularity_); void write(const Block & block, const IColumn::Permutation * permutation) override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 388737915ab..80df77cc97f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -162,20 +162,20 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, - const MergeTreeIndexGranularity & index_granularity_) + MergeTreeIndexGranularityPtr index_granularity_) : IMergeTreeDataPartWriter( data_part_name_, serializations_, data_part_storage_, index_granularity_info_, - storage_settings_, columns_list_, metadata_snapshot_, virtual_columns_, settings_, index_granularity_) + storage_settings_, columns_list_, metadata_snapshot_, virtual_columns_, settings_, std::move(index_granularity_)) , skip_indices(indices_to_recalc_) , stats(stats_to_recalc_) , marks_file_extension(marks_file_extension_) , default_codec(default_codec_) - , compute_granularity(index_granularity.empty()) + , compute_granularity(index_granularity->empty()) , compress_primary_key(settings.compress_primary_key) , execution_stats(skip_indices.size(), stats.size()) , log(getLogger(logger_name_ + " (DataPartWriter)")) { - if (settings.blocks_are_granules_size && !index_granularity.empty()) + if (settings.blocks_are_granules_size && !index_granularity->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't take information about index granularity from blocks, when non empty index_granularity array specified"); @@ -189,63 +189,15 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( initStatistics(); } -// Implementation is split into static functions for ability -/// of making unit tests without creation instance of IMergeTreeDataPartWriter, -/// which requires a lot of dependencies and access to filesystem. -static size_t computeIndexGranularityImpl( - const Block & block, - size_t index_granularity_bytes, - size_t fixed_index_granularity_rows, - bool blocks_are_granules, - bool can_use_adaptive_index_granularity) -{ - size_t rows_in_block = block.rows(); - size_t index_granularity_for_block; - - if (!can_use_adaptive_index_granularity) - { - index_granularity_for_block = fixed_index_granularity_rows; - } - else - { - size_t block_size_in_memory = block.bytes(); - if (blocks_are_granules) - { - index_granularity_for_block = rows_in_block; - } - else if (block_size_in_memory >= index_granularity_bytes) - { - size_t granules_in_block = block_size_in_memory / index_granularity_bytes; - index_granularity_for_block = rows_in_block / granules_in_block; - } - else - { - size_t size_of_row_in_bytes = std::max(block_size_in_memory / rows_in_block, 1UL); - index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes; - } - } - - /// We should be less or equal than fixed index granularity. - /// But if block size is a granule size then do not adjust it. - /// Granularity greater than fixed granularity might come from compact part. - if (!blocks_are_granules) - index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block); - - /// Very rare case when index granularity bytes less than single row. - if (index_granularity_for_block == 0) - index_granularity_for_block = 1; - - return index_granularity_for_block; -} - size_t MergeTreeDataPartWriterOnDisk::computeIndexGranularity(const Block & block) const { - return computeIndexGranularityImpl( - block, - (*storage_settings)[MergeTreeSetting::index_granularity_bytes], - (*storage_settings)[MergeTreeSetting::index_granularity], - settings.blocks_are_granules_size, - settings.can_use_adaptive_granularity); + return computeIndexGranularityForBlock( + block.rows(), + block.bytes(), + (*storage_settings)[MergeTreeSetting::index_granularity_bytes], + (*storage_settings)[MergeTreeSetting::index_granularity], + settings.blocks_are_granules_size, + settings.can_use_adaptive_granularity); } void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() @@ -433,7 +385,7 @@ void MergeTreeDataPartWriterOnDisk::fillPrimaryIndexChecksums(MergeTreeData::Dat { bool write_final_mark = (with_final_mark && data_written); if (write_final_mark && compute_granularity) - index_granularity.appendMark(0); + index_granularity->appendMark(0); if (index_file_hashing_stream) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 4a760c20b58..878fcb0307f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -116,7 +116,7 @@ public: const String & marks_file_extension, const CompressionCodecPtr & default_codec, const MergeTreeWriterSettings & settings, - const MergeTreeIndexGranularity & index_granularity); + MergeTreeIndexGranularityPtr index_granularity_); void setWrittenOffsetColumns(WrittenOffsetColumns * written_offset_columns_) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 433c7c21613..8e58a07af4d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -99,13 +99,13 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( const String & marks_file_extension_, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, - const MergeTreeIndexGranularity & index_granularity_) + MergeTreeIndexGranularityPtr index_granularity_) : MergeTreeDataPartWriterOnDisk( data_part_name_, logger_name_, serializations_, data_part_storage_, index_granularity_info_, storage_settings_, columns_list_, metadata_snapshot_, virtual_columns_, indices_to_recalc_, stats_to_recalc_, marks_file_extension_, - default_codec_, settings_, index_granularity_) + default_codec_, settings_, std::move(index_granularity_)) { if (settings.save_marks_in_cache) { @@ -255,8 +255,8 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri if (settings.can_use_adaptive_granularity && settings.blocks_are_granules_size) throw Exception(ErrorCodes::LOGICAL_ERROR, "Incomplete granules are not allowed while blocks are granules size. " "Mark number {} (rows {}), rows written in last mark {}, rows to write in last mark from block {} (from row {}), " - "total marks currently {}", last_granule.mark_number, index_granularity.getMarkRows(last_granule.mark_number), - rows_written_in_last_mark, last_granule.rows_to_write, last_granule.start_row, index_granularity.getMarksCount()); + "total marks currently {}", last_granule.mark_number, index_granularity->getMarkRows(last_granule.mark_number), + rows_written_in_last_mark, last_granule.rows_to_write, last_granule.start_row, index_granularity->getMarksCount()); /// Shift forward except last granule setCurrentMark(getCurrentMark() + granules_written.size() - 1); @@ -288,7 +288,7 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm size_t index_granularity_for_block = computeIndexGranularity(block); if (rows_written_in_last_mark > 0) { - size_t rows_left_in_last_mark = index_granularity.getMarkRows(getCurrentMark()) - rows_written_in_last_mark; + size_t rows_left_in_last_mark = index_granularity->getMarkRows(getCurrentMark()) - rows_written_in_last_mark; /// Previous granularity was much bigger than our new block's /// granularity let's adjust it, because we want add new /// heavy-weight blocks into small old granule. @@ -308,7 +308,7 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm Block block_to_write = block; - auto granules_to_write = getGranulesToWrite(index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); + auto granules_to_write = getGranulesToWrite(*index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; Block primary_key_block; @@ -496,7 +496,7 @@ void MergeTreeDataPartWriterWide::writeColumn( throw Exception(ErrorCodes::LOGICAL_ERROR, "We have to add new mark for column, but already have non written mark. " "Current mark {}, total marks {}, offset {}", - getCurrentMark(), index_granularity.getMarksCount(), rows_written_in_last_mark); + getCurrentMark(), index_granularity->getMarksCount(), rows_written_in_last_mark); last_non_written_marks[name] = getCurrentMarksForColumn(name_and_type, column.getPtr(), offset_columns); } @@ -516,7 +516,7 @@ void MergeTreeDataPartWriterWide::writeColumn( throw Exception(ErrorCodes::LOGICAL_ERROR, "No mark was saved for incomplete granule for column {}", backQuoteIfNeed(name)); for (const auto & mark : marks_it->second) - flushMarkToFile(mark, index_granularity.getMarkRows(granule.mark_number)); + flushMarkToFile(mark, index_granularity->getMarkRows(granule.mark_number)); last_non_written_marks.erase(marks_it); } } @@ -563,10 +563,10 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai for (mark_num = 0; !mrk_in->eof(); ++mark_num) { - if (mark_num > index_granularity.getMarksCount()) + if (mark_num > index_granularity->getMarksCount()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect number of marks in memory {}, on disk (at least) {}", - index_granularity.getMarksCount(), mark_num + 1); + index_granularity->getMarksCount(), mark_num + 1); readBinaryLittleEndian(offset_in_compressed_file, *mrk_in); readBinaryLittleEndian(offset_in_decompressed_block, *mrk_in); @@ -597,10 +597,10 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai throw Exception(ErrorCodes::LOGICAL_ERROR, "Still have {} rows in bin stream, last mark #{}" " index granularity size {}, last rows {}", - column->size(), mark_num, index_granularity.getMarksCount(), index_granularity_rows); + column->size(), mark_num, index_granularity->getMarksCount(), index_granularity_rows); } - if (index_granularity_rows != index_granularity.getMarkRows(mark_num)) + if (index_granularity_rows != index_granularity->getMarkRows(mark_num)) { throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -608,8 +608,8 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai " (compressed offset {}, decompressed offset {}), in-memory {}, on disk {}, total marks {}", getDataPartStorage().getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, - index_granularity.getMarkRows(mark_num), index_granularity_rows, - index_granularity.getMarksCount()); + index_granularity->getMarkRows(mark_num), index_granularity_rows, + index_granularity->getMarksCount()); } auto column = type->createColumn(); @@ -644,7 +644,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for mark #{} (compressed offset {}, decompressed offset {}), " "actually in bin file {}, in mrk file {}, total marks {}", mark_num, offset_in_compressed_file, offset_in_decompressed_block, column->size(), - index_granularity.getMarkRows(mark_num), index_granularity.getMarksCount()); + index_granularity->getMarkRows(mark_num), index_granularity->getMarksCount()); } } @@ -652,7 +652,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai throw Exception(ErrorCodes::LOGICAL_ERROR, "Still have something in marks stream, last mark #{}" " index granularity size {}, last rows {}", - mark_num, index_granularity.getMarksCount(), index_granularity_rows); + mark_num, index_granularity->getMarksCount(), index_granularity_rows); if (!bin_in.eof()) { auto column = type->createColumn(); @@ -662,7 +662,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai throw Exception(ErrorCodes::LOGICAL_ERROR, "Still have {} rows in bin stream, last mark #{}" " index granularity size {}, last rows {}", - column->size(), mark_num, index_granularity.getMarksCount(), index_granularity_rows); + column->size(), mark_num, index_granularity->getMarksCount(), index_granularity_rows); } } @@ -679,8 +679,8 @@ void MergeTreeDataPartWriterWide::fillDataChecksums(MergeTreeDataPartChecksums & throw Exception(ErrorCodes::LOGICAL_ERROR, "Incomplete granule is not allowed while blocks are granules size even for last granule. " "Mark number {} (rows {}), rows written for last mark {}, total marks {}", - getCurrentMark(), index_granularity.getMarkRows(getCurrentMark()), - rows_written_in_last_mark, index_granularity.getMarksCount()); + getCurrentMark(), index_granularity->getMarkRows(getCurrentMark()), + rows_written_in_last_mark, index_granularity->getMarksCount()); adjustLastMarkIfNeedAndFlushToDisk(rows_written_in_last_mark); } @@ -799,16 +799,16 @@ static void fillIndexGranularityImpl( void MergeTreeDataPartWriterWide::fillIndexGranularity(size_t index_granularity_for_block, size_t rows_in_block) { - if (getCurrentMark() < index_granularity.getMarksCount() && getCurrentMark() != index_granularity.getMarksCount() - 1) + if (getCurrentMark() < index_granularity->getMarksCount() && getCurrentMark() != index_granularity->getMarksCount() - 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to add marks, while current mark {}, but total marks {}", - getCurrentMark(), index_granularity.getMarksCount()); + getCurrentMark(), index_granularity->getMarksCount()); size_t index_offset = 0; if (rows_written_in_last_mark != 0) - index_offset = index_granularity.getLastMarkRows() - rows_written_in_last_mark; + index_offset = index_granularity->getLastMarkRows() - rows_written_in_last_mark; fillIndexGranularityImpl( - index_granularity, + *index_granularity, index_offset, index_granularity_for_block, rows_in_block); @@ -827,27 +827,26 @@ void MergeTreeDataPartWriterWide::adjustLastMarkIfNeedAndFlushToDisk(size_t new_ /// other columns if (compute_granularity && settings.can_use_adaptive_granularity) { - if (getCurrentMark() != index_granularity.getMarksCount() - 1) + if (getCurrentMark() != index_granularity->getMarksCount() - 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Non last mark {} (with {} rows) having rows offset {}, total marks {}", - getCurrentMark(), index_granularity.getMarkRows(getCurrentMark()), - rows_written_in_last_mark, index_granularity.getMarksCount()); + getCurrentMark(), index_granularity->getMarkRows(getCurrentMark()), + rows_written_in_last_mark, index_granularity->getMarksCount()); - index_granularity.popMark(); - index_granularity.appendMark(new_rows_in_last_mark); + index_granularity->adjustLastMark(new_rows_in_last_mark); } /// Last mark should be filled, otherwise it's a bug if (last_non_written_marks.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "No saved marks for last mark {} having rows offset {}, total marks {}", - getCurrentMark(), rows_written_in_last_mark, index_granularity.getMarksCount()); + getCurrentMark(), rows_written_in_last_mark, index_granularity->getMarksCount()); if (rows_written_in_last_mark == new_rows_in_last_mark) { for (const auto & [name, marks] : last_non_written_marks) { for (const auto & mark : marks) - flushMarkToFile(mark, index_granularity.getMarkRows(getCurrentMark())); + flushMarkToFile(mark, index_granularity->getMarkRows(getCurrentMark())); } last_non_written_marks.clear(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index b594b2d79bb..f2c4b0809bc 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -35,7 +35,7 @@ public: const String & marks_file_extension, const CompressionCodecPtr & default_codec, const MergeTreeWriterSettings & settings, - const MergeTreeIndexGranularity & index_granularity); + MergeTreeIndexGranularityPtr index_granularity_); void write(const Block & block, const IColumn::Permutation * permutation) override; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 1b3c58000e7..be3a9cdc9b5 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -129,7 +129,7 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead( { MarkRanges part_ranges = markRangesFromPKRange(part, metadata_snapshot, key_condition, {}, &exact_ranges, settings, log); for (const auto & range : part_ranges) - rows_count += part->index_granularity.getRowsCountInRange(range); + rows_count += part->index_granularity->getRowsCountInRange(range); } UNUSED(exact_ranges); @@ -688,7 +688,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd auto & part = parts[part_index]; RangesInDataPart ranges(part, part_index); - size_t total_marks_count = part->index_granularity.getMarksCountWithoutFinal(); + size_t total_marks_count = part->index_granularity->getMarksCountWithoutFinal(); if (metadata_snapshot->hasPrimaryKey() || part_offset_condition) { @@ -1044,12 +1044,12 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( { MarkRanges res; - size_t marks_count = part->index_granularity.getMarksCount(); + size_t marks_count = part->index_granularity->getMarksCount(); const auto & index = part->getIndex(); if (marks_count == 0) return res; - bool has_final_mark = part->index_granularity.hasFinalMark(); + bool has_final_mark = part->index_granularity->hasFinalMark(); bool key_condition_useful = !key_condition.alwaysUnknownOrTrue(); bool part_offset_condition_useful = part_offset_condition && !part_offset_condition->alwaysUnknownOrTrue(); @@ -1156,16 +1156,16 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( auto check_part_offset_condition = [&]() { - auto begin = part->index_granularity.getMarkStartingRow(range.begin); - auto end = part->index_granularity.getMarkStartingRow(range.end) - 1; + auto begin = part->index_granularity->getMarkStartingRow(range.begin); + auto end = part->index_granularity->getMarkStartingRow(range.end) - 1; if (begin > end) { /// Empty mark (final mark) return BoolMask(false, true); } - part_offset_left[0] = part->index_granularity.getMarkStartingRow(range.begin); - part_offset_right[0] = part->index_granularity.getMarkStartingRow(range.end) - 1; + part_offset_left[0] = part->index_granularity->getMarkStartingRow(range.begin); + part_offset_right[0] = part->index_granularity->getMarkStartingRow(range.end) - 1; part_offset_left[1] = part->name; part_offset_right[1] = part->name; @@ -1377,9 +1377,8 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( part->index_granularity_info.fixed_index_granularity, part->index_granularity_info.index_granularity_bytes); - size_t marks_count = part->getMarksCount(); - size_t final_mark = part->index_granularity.hasFinalMark(); - size_t index_marks_count = (marks_count - final_mark + index_granularity - 1) / index_granularity; + size_t marks_count = part->index_granularity->getMarksCountWithoutFinal(); + size_t index_marks_count = (marks_count + index_granularity - 1) / index_granularity; MarkRanges index_ranges; for (const auto & range : ranges) @@ -1427,8 +1426,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( for (auto row : rows) { - const MergeTreeIndexGranularity & merge_tree_index_granularity = part->index_granularity; - size_t num_marks = merge_tree_index_granularity.countMarksForRows(index_mark * index_granularity, row); + size_t num_marks = part->index_granularity->countMarksForRows(index_mark * index_granularity, row); MarkRange data_range( std::max(ranges[i].begin, (index_mark * index_granularity) + num_marks), @@ -1501,9 +1499,8 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingMergedIndex( part->index_granularity_info.fixed_index_granularity, part->index_granularity_info.index_granularity_bytes); - size_t marks_count = part->getMarksCount(); - size_t final_mark = part->index_granularity.hasFinalMark(); - size_t index_marks_count = (marks_count - final_mark + index_granularity - 1) / index_granularity; + size_t marks_count = part->index_granularity->getMarksCountWithoutFinal(); + size_t index_marks_count = (marks_count + index_granularity - 1) / index_granularity; std::vector> readers; for (const auto & index_helper : indices) @@ -1603,9 +1600,7 @@ void MergeTreeDataSelectExecutor::selectPartsToRead( continue; } - size_t num_granules = part->getMarksCount(); - if (num_granules && part->index_granularity.hasFinalMark()) - --num_granules; + size_t num_granules = part->index_granularity->getMarksCountWithoutFinal(); counters.num_initial_selected_parts += 1; counters.num_initial_selected_granules += num_granules; @@ -1672,9 +1667,7 @@ void MergeTreeDataSelectExecutor::selectPartsToReadWithUUIDFilter( if (part->uuid != UUIDHelpers::Nil && ignored_part_uuids->has(part->uuid)) continue; - size_t num_granules = part->getMarksCount(); - if (num_granules && part->index_granularity.hasFinalMark()) - --num_granules; + size_t num_granules = part->index_granularity->getMarksCountWithoutFinal(); counters.num_initial_selected_parts += 1; counters.num_initial_selected_granules += num_granules; diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index ac29a9244b0..76710f938f9 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -687,6 +687,13 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); bool save_marks_in_cache = (*data_settings)[MergeTreeSetting::prewarm_mark_cache] && data.getContext()->getMarkCache(); + auto index_granularity_ptr = createMergeTreeIndexGranularity( + block.rows(), + block.bytes(), + *data.getSettings(), + new_data_part->index_granularity_info, + /*blocks_are_granules=*/ false); + auto out = std::make_unique( new_data_part, metadata_snapshot, @@ -694,6 +701,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( indices, statistics, compression_codec, + std::move(index_granularity_ptr), context->getCurrentTransaction() ? context->getCurrentTransaction()->tid : Tx::PrehistoricTID, /*reset_columns=*/ false, save_marks_in_cache, @@ -834,6 +842,13 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); bool save_marks_in_cache = (*data.getSettings())[MergeTreeSetting::prewarm_mark_cache] && data.getContext()->getMarkCache(); + auto index_granularity_ptr = createMergeTreeIndexGranularity( + block.rows(), + block.bytes(), + *data.getSettings(), + new_data_part->index_granularity_info, + /*blocks_are_granules=*/ false); + auto out = std::make_unique( new_data_part, metadata_snapshot, @@ -842,6 +857,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( /// TODO(hanfei): It should be helpful to write statistics for projection result. ColumnsStatistics{}, compression_codec, + std::move(index_granularity_ptr), Tx::PrehistoricTID, /*reset_columns=*/ false, save_marks_in_cache, diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index d69a00643f0..980abaee01c 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -1,77 +1,25 @@ +#include #include -#include +#include +#include +#include #include - +#include "Common/Exception.h" +#include "Storages/MergeTree/MergeTreeDataPartType.h" namespace DB { + namespace ErrorCodes { extern const int LOGICAL_ERROR; } -MergeTreeIndexGranularity::MergeTreeIndexGranularity(const std::vector & marks_rows_partial_sums_) - : marks_rows_partial_sums(marks_rows_partial_sums_) +namespace MergeTreeSetting { -} - -/// Rows after mark to next mark -size_t MergeTreeIndexGranularity::getMarkRows(size_t mark_index) const -{ - if (mark_index >= getMarksCount()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get non existing mark {}, while size is {}", mark_index, getMarksCount()); - if (mark_index == 0) - return marks_rows_partial_sums[0]; - return marks_rows_partial_sums[mark_index] - marks_rows_partial_sums[mark_index - 1]; -} - -size_t MergeTreeIndexGranularity::getMarkStartingRow(size_t mark_index) const -{ - if (mark_index == 0) - return 0; - return marks_rows_partial_sums[mark_index - 1]; -} - -size_t MergeTreeIndexGranularity::getMarksCount() const -{ - return marks_rows_partial_sums.size(); -} - -size_t MergeTreeIndexGranularity::getTotalRows() const -{ - if (marks_rows_partial_sums.empty()) - return 0; - return marks_rows_partial_sums.back(); -} - -void MergeTreeIndexGranularity::appendMark(size_t rows_count) -{ - if (marks_rows_partial_sums.empty()) - marks_rows_partial_sums.push_back(rows_count); - else - marks_rows_partial_sums.push_back(marks_rows_partial_sums.back() + rows_count); -} - -void MergeTreeIndexGranularity::addRowsToLastMark(size_t rows_count) -{ - if (marks_rows_partial_sums.empty()) - marks_rows_partial_sums.push_back(rows_count); - else - marks_rows_partial_sums.back() += rows_count; -} - -void MergeTreeIndexGranularity::popMark() -{ - if (!marks_rows_partial_sums.empty()) - marks_rows_partial_sums.pop_back(); -} - -size_t MergeTreeIndexGranularity::getRowsCountInRange(size_t begin, size_t end) const -{ - size_t subtrahend = 0; - if (begin != 0) - subtrahend = marks_rows_partial_sums[begin - 1]; - return marks_rows_partial_sums[end - 1] - subtrahend; + extern const MergeTreeSettingsUInt64 index_granularity; + extern const MergeTreeSettingsUInt64 index_granularity_bytes; + extern const MergeTreeSettingsBool use_const_adaptive_granularity; } size_t MergeTreeIndexGranularity::getRowsCountInRange(const MarkRange & range) const @@ -87,39 +35,111 @@ size_t MergeTreeIndexGranularity::getRowsCountInRanges(const MarkRanges & ranges return total; } -size_t MergeTreeIndexGranularity::countMarksForRows(size_t from_mark, size_t number_of_rows) const +size_t MergeTreeIndexGranularity::getMarksCountWithoutFinal() const { - size_t rows_before_mark = getMarkStartingRow(from_mark); - size_t last_row_pos = rows_before_mark + number_of_rows; - auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); - size_t to_mark = it - marks_rows_partial_sums.begin(); - return to_mark - from_mark; + size_t total = getMarksCount(); + if (total == 0) + return total; + return total - hasFinalMark(); } -size_t MergeTreeIndexGranularity::countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const +size_t MergeTreeIndexGranularity::getLastMarkRows() const { - size_t rows_before_mark = getMarkStartingRow(from_mark); - size_t last_row_pos = rows_before_mark + offset_in_rows + number_of_rows; - auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); - size_t to_mark = it - marks_rows_partial_sums.begin(); - - return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; + return getMarkRows(getMarksCount() - 1); } -void MergeTreeIndexGranularity::resizeWithFixedGranularity(size_t size, size_t fixed_granularity) +size_t MergeTreeIndexGranularity::getLastNonFinalMarkRows() const { - marks_rows_partial_sums.resize(size); + size_t last_mark_rows = getLastMarkRows(); + if (last_mark_rows != 0) + return last_mark_rows; + return getMarkRows(getMarksCount() - 2); +} - size_t prev = 0; - for (size_t i = 0; i < size; ++i) +void MergeTreeIndexGranularity::addRowsToLastMark(size_t rows_count) +{ + if (hasFinalMark()) { - marks_rows_partial_sums[i] = fixed_granularity + prev; - prev = marks_rows_partial_sums[i]; + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add rows to final mark"); + } + else if (empty()) + { + appendMark(rows_count); + } + else + { + adjustLastMark(getLastMarkRows() + rows_count); } } -std::string MergeTreeIndexGranularity::describe() const +size_t computeIndexGranularityForBlock( + size_t rows_in_block, + size_t bytes_in_block, + size_t index_granularity_bytes, + size_t fixed_index_granularity_rows, + bool blocks_are_granules, + bool can_use_adaptive_index_granularity) { - return fmt::format("initialized: {}, marks_rows_partial_sums: [{}]", initialized, fmt::join(marks_rows_partial_sums, ", ")); + size_t index_granularity_for_block; + + if (!can_use_adaptive_index_granularity) + { + index_granularity_for_block = fixed_index_granularity_rows; + } + else + { + if (blocks_are_granules) + { + index_granularity_for_block = rows_in_block; + } + else if (bytes_in_block >= index_granularity_bytes) + { + size_t granules_in_block = bytes_in_block / index_granularity_bytes; + index_granularity_for_block = rows_in_block / granules_in_block; + } + else + { + size_t size_of_row_in_bytes = std::max(bytes_in_block / rows_in_block, 1UL); + index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes; + } + } + + /// We should be less or equal than fixed index granularity. + /// But if block size is a granule size then do not adjust it. + /// Granularity greater than fixed granularity might come from compact part. + if (!blocks_are_granules) + index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block); + + /// Very rare case when index granularity bytes less than single row. + if (index_granularity_for_block == 0) + index_granularity_for_block = 1; + + return index_granularity_for_block; } + +MergeTreeIndexGranularityPtr createMergeTreeIndexGranularity( + size_t rows_in_block, + size_t bytes_in_block, + const MergeTreeSettings & settings, + const MergeTreeIndexGranularityInfo & info, + bool blocks_are_granules) +{ + bool use_adaptive_granularity = info.mark_type.adaptive; + bool use_const_adaptive_granularity = settings[MergeTreeSetting::use_const_adaptive_granularity]; + bool is_compact_part = info.mark_type.part_type == MergeTreeDataPartType::Compact; + + if (blocks_are_granules || is_compact_part || (use_adaptive_granularity && !use_const_adaptive_granularity)) + return std::make_shared(); + + size_t computed_granularity = computeIndexGranularityForBlock( + rows_in_block, + bytes_in_block, + settings[MergeTreeSetting::index_granularity_bytes], + settings[MergeTreeSetting::index_granularity], + blocks_are_granules, + use_adaptive_granularity); + + return std::make_shared(computed_granularity); +} + } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index f66e721ec1e..91b9b1addea 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -1,6 +1,7 @@ #pragma once -#include +#include #include +#include "Storages/MergeTree/MergeTreeSettings.h" namespace DB { @@ -13,23 +14,19 @@ namespace DB /// all values in inner vector would have constant stride (default 8192). class MergeTreeIndexGranularity { -private: - std::vector marks_rows_partial_sums; - bool initialized = false; - public: MergeTreeIndexGranularity() = default; - explicit MergeTreeIndexGranularity(const std::vector & marks_rows_partial_sums_); + virtual ~MergeTreeIndexGranularity() = default; /// Return count of rows between marks - size_t getRowsCountInRange(const MarkRange & range) const; + virtual size_t getRowsCountInRange(size_t begin, size_t end) const = 0; /// Return count of rows between marks - size_t getRowsCountInRange(size_t begin, size_t end) const; + size_t getRowsCountInRange(const MarkRange & range) const; /// Return sum of rows between all ranges size_t getRowsCountInRanges(const MarkRanges & ranges) const; /// Return number of marks, starting from `from_marks` that contain `number_of_rows` - size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const; + virtual size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const = 0; /// Return number of rows, starting from `from_mark`, that contains amount of `number_of_rows` /// and possible some offset_in_rows from `from_mark` @@ -37,69 +34,64 @@ public: /// |-----|---------------------------|----|----| /// ^------------------------^-----------^ //// from_mark offset_in_rows number_of_rows - size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const; + virtual size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const = 0; /// Total marks - size_t getMarksCount() const; + virtual size_t getMarksCount() const = 0; /// Total rows - size_t getTotalRows() const; + virtual size_t getTotalRows() const = 0; /// Total number marks without final mark if it exists - size_t getMarksCountWithoutFinal() const { return getMarksCount() - hasFinalMark(); } + size_t getMarksCountWithoutFinal() const; /// Rows after mark to next mark - size_t getMarkRows(size_t mark_index) const; + virtual size_t getMarkRows(size_t mark_index) const = 0; /// Return amount of rows before mark - size_t getMarkStartingRow(size_t mark_index) const; + virtual size_t getMarkStartingRow(size_t mark_index) const = 0; /// Amount of rows after last mark - size_t getLastMarkRows() const - { - size_t last = marks_rows_partial_sums.size() - 1; - return getMarkRows(last); - } + size_t getLastMarkRows() const; - size_t getLastNonFinalMarkRows() const - { - size_t last_mark_rows = getLastMarkRows(); - if (last_mark_rows != 0) - return last_mark_rows; - return getMarkRows(marks_rows_partial_sums.size() - 2); - } + /// Amount of rows after last non-final mark + size_t getLastNonFinalMarkRows() const; - bool hasFinalMark() const - { - return getLastMarkRows() == 0; - } + virtual bool hasFinalMark() const = 0; - bool empty() const - { - return marks_rows_partial_sums.empty(); - } + bool empty() const { return getMarksCount() == 0; } - bool isInitialized() const - { - return initialized; - } - - void setInitialized() - { - initialized = true; - } /// Add new mark with rows_count - void appendMark(size_t rows_count); + virtual void appendMark(size_t rows_count) = 0; /// Extends last mark by rows_count. + virtual void adjustLastMark(size_t rows_count) = 0; void addRowsToLastMark(size_t rows_count); - /// Drops last mark if any exists. - void popMark(); + virtual void shrinkToFitInMemory() = 0; - /// Add `size` of marks with `fixed_granularity` rows - void resizeWithFixedGranularity(size_t size, size_t fixed_granularity); + virtual std::shared_ptr optimize() const = 0; - std::string describe() const; + virtual std::string describe() const = 0; }; +using MergeTreeIndexGranularityPtr = std::shared_ptr; + +size_t computeIndexGranularityForBlock( + size_t rows_in_block, + size_t bytes_in_block, + size_t index_granularity_bytes, + size_t fixed_index_granularity_rows, + bool blocks_are_granules, + bool can_use_adaptive_index_granularity); + +struct MergeTreeSettings; +struct MergeTreeIndexGranularityInfo; + +MergeTreeIndexGranularityPtr createMergeTreeIndexGranularity( + size_t rows_in_block, + size_t bytes_in_block, + const MergeTreeSettings & settings, + const MergeTreeIndexGranularityInfo & info, + bool blocks_are_granules); + } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp new file mode 100644 index 00000000000..738a5a1d018 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp @@ -0,0 +1,151 @@ +#include +#include +#include +#include "Common/Logger.h" +#include "Common/logger_useful.h" +#include "Storages/MergeTree/MergeTreeIndexGranularityConstant.h" + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +MergeTreeIndexGranularityAdaptive::MergeTreeIndexGranularityAdaptive(const std::vector & marks_rows_partial_sums_) + : marks_rows_partial_sums(marks_rows_partial_sums_) +{ +} + +/// Rows after mark to next mark +size_t MergeTreeIndexGranularityAdaptive::getMarkRows(size_t mark_index) const +{ + if (mark_index >= getMarksCount()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get non existing mark {}, while size is {}", mark_index, getMarksCount()); + + if (mark_index == 0) + return marks_rows_partial_sums[0]; + + return marks_rows_partial_sums[mark_index] - marks_rows_partial_sums[mark_index - 1]; +} + +size_t MergeTreeIndexGranularityAdaptive::getMarkStartingRow(size_t mark_index) const +{ + if (mark_index > getMarksCount()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get non existing mark {}, while size is {}", mark_index, getMarksCount()); + + if (mark_index == 0) + return 0; + + return marks_rows_partial_sums[mark_index - 1]; +} + +bool MergeTreeIndexGranularityAdaptive::hasFinalMark() const +{ + if (marks_rows_partial_sums.empty()) + return false; + return marks_rows_partial_sums.back() == 0; +} + +size_t MergeTreeIndexGranularityAdaptive::getMarksCount() const +{ + return marks_rows_partial_sums.size(); +} + +size_t MergeTreeIndexGranularityAdaptive::getTotalRows() const +{ + if (marks_rows_partial_sums.empty()) + return 0; + return marks_rows_partial_sums.back(); +} + +void MergeTreeIndexGranularityAdaptive::appendMark(size_t rows_count) +{ + if (hasFinalMark()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot append mark after final"); + } + else if (marks_rows_partial_sums.empty()) + { + marks_rows_partial_sums.push_back(rows_count); + } + else + { + marks_rows_partial_sums.push_back(marks_rows_partial_sums.back() + rows_count); + } +} + +void MergeTreeIndexGranularityAdaptive::adjustLastMark(size_t rows_count) +{ + if (hasFinalMark()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot adjust final mark"); + } + else if (marks_rows_partial_sums.empty()) + { + marks_rows_partial_sums.push_back(rows_count); + } + else + { + marks_rows_partial_sums.pop_back(); + appendMark(rows_count); + } +} + +size_t MergeTreeIndexGranularityAdaptive::getRowsCountInRange(size_t begin, size_t end) const +{ + size_t subtrahend = 0; + if (begin != 0) + subtrahend = marks_rows_partial_sums[begin - 1]; + return marks_rows_partial_sums[end - 1] - subtrahend; +} + +size_t MergeTreeIndexGranularityAdaptive::countMarksForRows(size_t from_mark, size_t number_of_rows) const +{ + size_t rows_before_mark = getMarkStartingRow(from_mark); + size_t last_row_pos = rows_before_mark + number_of_rows; + auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); + size_t to_mark = it - marks_rows_partial_sums.begin(); + return to_mark - from_mark; +} + +size_t MergeTreeIndexGranularityAdaptive::countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const +{ + size_t rows_before_mark = getMarkStartingRow(from_mark); + size_t last_row_pos = rows_before_mark + offset_in_rows + number_of_rows; + auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); + size_t to_mark = it - marks_rows_partial_sums.begin(); + + return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; +} + +void MergeTreeIndexGranularityAdaptive::shrinkToFitInMemory() +{ + marks_rows_partial_sums.shrink_to_fit(); +} + +std::shared_ptr MergeTreeIndexGranularityAdaptive::optimize() const +{ + size_t marks_count = getMarksCountWithoutFinal(); + if (marks_count == 0) + return nullptr; + + size_t first_mark = getMarkRows(0); + for (size_t i = 1; i < marks_count - 1; ++i) + { + if (getMarkRows(i) != first_mark) + return nullptr; + } + + size_t last_mark = getMarkRows(marks_count - 1); + return std::make_shared(first_mark, last_mark, marks_count, hasFinalMark()); +} + +std::string MergeTreeIndexGranularityAdaptive::describe() const +{ + return fmt::format("Adaptive(marks_rows_partial_sums: [{}])", fmt::join(marks_rows_partial_sums, ", ")); +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h new file mode 100644 index 00000000000..e4f8a4a554b --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h @@ -0,0 +1,36 @@ +#pragma once +#include + +namespace DB +{ + +class MergeTreeIndexGranularityAdaptive : public MergeTreeIndexGranularity +{ +public: + MergeTreeIndexGranularityAdaptive() = default; + explicit MergeTreeIndexGranularityAdaptive(const std::vector & marks_rows_partial_sums_); + + size_t getRowsCountInRange(size_t begin, size_t end) const override; + size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const override; + size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const override; + + size_t getMarksCount() const override; + size_t getTotalRows() const override; + + size_t getMarkRows(size_t mark_index) const override; + size_t getMarkStartingRow(size_t mark_index) const override; + bool hasFinalMark() const override; + + void appendMark(size_t rows_count) override; + void adjustLastMark(size_t rows_count) override; + void shrinkToFitInMemory() override; + std::shared_ptr optimize() const override; + + std::string describe() const override; + +private: + std::vector marks_rows_partial_sums; +}; + +} + diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp new file mode 100644 index 00000000000..9db8ce5d199 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp @@ -0,0 +1,150 @@ +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +MergeTreeIndexGranularityConstant::MergeTreeIndexGranularityConstant(size_t constant_granularity_) + : constant_granularity(constant_granularity_) + , last_mark_granularity(constant_granularity_) +{ +} + +MergeTreeIndexGranularityConstant::MergeTreeIndexGranularityConstant(size_t constant_granularity_, size_t last_mark_granularity_, size_t num_marks_without_final_, bool has_final_mark_) + : constant_granularity(constant_granularity_) + , last_mark_granularity(last_mark_granularity_) + , num_marks_without_final(num_marks_without_final_) + , has_final_mark(has_final_mark_) +{ +} + +/// Rows after mark to next mark +size_t MergeTreeIndexGranularityConstant::getMarkRows(size_t mark_index) const +{ + if (mark_index >= getMarksCount()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get non existing mark {}, while size is {}", mark_index, getMarksCount()); + + if (mark_index + 1 < num_marks_without_final) + return constant_granularity; + + if (mark_index + 1 == num_marks_without_final) + return last_mark_granularity; + + return 0; // Final mark. +} + +size_t MergeTreeIndexGranularityConstant::getMarkStartingRow(size_t mark_index) const +{ + if (mark_index > getMarksCount()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get non existing mark {}, while size is {}", mark_index, getMarksCount()); + + size_t total_rows = 0; + if (mark_index >= num_marks_without_final && mark_index != 0) + { + total_rows += last_mark_granularity; + mark_index = num_marks_without_final - 1; + } + + total_rows += constant_granularity * mark_index; + return total_rows; +} + +size_t MergeTreeIndexGranularityConstant::getMarksCount() const +{ + return num_marks_without_final + has_final_mark; +} + +size_t MergeTreeIndexGranularityConstant::getTotalRows() const +{ + if (num_marks_without_final == 0) + return 0; + + return constant_granularity * (num_marks_without_final - 1) + last_mark_granularity; +} + +void MergeTreeIndexGranularityConstant::appendMark(size_t rows_count) +{ + if (has_final_mark) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot append mark after final"); + } + else if (rows_count == 0) + { + has_final_mark = true; + } + else if (rows_count != constant_granularity) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot append mark with {} rows. Granularity is constant ({})", rows_count, constant_granularity); + } + else + { + ++num_marks_without_final; + } +} + +void MergeTreeIndexGranularityConstant::adjustLastMark(size_t rows_count) +{ + if (has_final_mark) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot adjust final mark"); + } + else + { + if (num_marks_without_final == 0) + ++num_marks_without_final; + + last_mark_granularity = rows_count; + } +} + +size_t MergeTreeIndexGranularityConstant::getRowsCountInRange(size_t begin, size_t end) const +{ + size_t total_rows = 0; + if (end >= num_marks_without_final && end != 0) + { + total_rows += last_mark_granularity; + end = num_marks_without_final - 1; + } + + total_rows += constant_granularity * (end - begin); + return total_rows; +} + +size_t MergeTreeIndexGranularityConstant::countMarksForRows(size_t from_mark, size_t number_of_rows) const +{ + size_t rows_before_mark = getMarkStartingRow(from_mark); + size_t last_row_pos = rows_before_mark + number_of_rows; + + if (last_row_pos >= (num_marks_without_final - 1) * constant_granularity) + return num_marks_without_final - from_mark; + + return (last_row_pos + constant_granularity - 1) / constant_granularity - from_mark; +} + +size_t MergeTreeIndexGranularityConstant::countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const +{ + UNUSED(from_mark, number_of_rows, offset_in_rows); + return 0; + // size_t rows_before_mark = getMarkStartingRow(from_mark); + // size_t last_row_pos = rows_before_mark + offset_in_rows + number_of_rows; + // auto it = std::upper_bound(marks_rows_partial_sums.begin(), marks_rows_partial_sums.end(), last_row_pos); + // size_t to_mark = it - marks_rows_partial_sums.begin(); + + // return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; +} + +std::string MergeTreeIndexGranularityConstant::describe() const +{ + return fmt::format( + "Constant(constant_granularity: {}, last_mark_granularity: {}, num_marks_without_final: {}, has_final_mark: {}", + constant_granularity, last_mark_granularity, num_marks_without_final, has_final_mark); +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h new file mode 100644 index 00000000000..36705a9e3d7 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h @@ -0,0 +1,41 @@ +#pragma once +#include + +namespace DB +{ + +class MergeTreeIndexGranularityConstant : public MergeTreeIndexGranularity +{ +private: + size_t constant_granularity; + size_t last_mark_granularity; + + size_t num_marks_without_final = 0; + bool has_final_mark = false; + +public: + MergeTreeIndexGranularityConstant() = default; + explicit MergeTreeIndexGranularityConstant(size_t constant_granularity_); + MergeTreeIndexGranularityConstant(size_t constant_granularity_, size_t last_mark_granularity_, size_t num_marks_without_final_, bool has_final_mark_); + + size_t getRowsCountInRange(size_t begin, size_t end) const override; + size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const override; + size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const override; + + size_t getMarksCount() const override; + size_t getTotalRows() const override; + + size_t getMarkRows(size_t mark_index) const override; + size_t getMarkStartingRow(size_t mark_index) const override; + bool hasFinalMark() const override { return has_final_mark; } + + void appendMark(size_t rows_count) override; + void adjustLastMark(size_t rows_count) override; + void shrinkToFitInMemory() override {} + std::shared_ptr optimize() const override { return nullptr; } + + std::string describe() const override; +}; + +} + diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h index b302d6b1a4b..62632b683ae 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h @@ -4,12 +4,12 @@ #include #include #include -#include namespace DB { class MergeTreeData; +class IDataPartStorage; /** Various types of mark files are stored in files with various extensions: diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 32d6c41bdcb..d54f6806154 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -4,6 +4,7 @@ #include #include #include +#include "Common/Logger.h" #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 72fddb93a6d..a2303ee0899 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -151,7 +151,7 @@ UInt64 MergeTreeReadTask::estimateNumRows() const return rows_to_read; const auto & index_granularity = info->data_part->index_granularity; - return index_granularity.countRowsForRows(range_readers.main.currentMark(), rows_to_read, range_readers.main.numReadRowsInCurrentGranule()); + return index_granularity->countRowsForRows(range_readers.main.currentMark(), rows_to_read, range_readers.main.numReadRowsInCurrentGranule()); } MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read() diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 653973e9db7..fc7ef041c7c 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -229,7 +229,7 @@ try if (!isCancelled() && current_row < data_part->rows_count) { - size_t rows_to_read = data_part->index_granularity.getMarkRows(current_mark); + size_t rows_to_read = data_part->index_granularity->getMarkRows(current_mark); bool continue_reading = (current_mark != 0); const auto & sample = reader->getColumns(); diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 883191d59ab..6746248bad8 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -185,6 +185,8 @@ namespace ErrorCodes DECLARE(UInt64, min_merge_bytes_to_use_direct_io, 10ULL * 1024 * 1024 * 1024, "Minimal amount of bytes to enable O_DIRECT in merge (0 - disabled).", 0) \ DECLARE(UInt64, index_granularity_bytes, 10 * 1024 * 1024, "Approximate amount of bytes in single granule (0 - disabled).", 0) \ DECLARE(UInt64, min_index_granularity_bytes, 1024, "Minimum amount of bytes in single granule.", 1024) \ + DECLARE(Bool, use_const_adaptive_granularity, false, "KEK KEK KEK KEK KEK KEK", 0) \ + DECLARE(Bool, enable_index_granularity_compression, true, "KEK KEK KEK KEK KEK KEK", 0) \ DECLARE(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \ DECLARE(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \ DECLARE(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \ diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 77c34aae30a..1eb967fabf8 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -1,8 +1,10 @@ +#include #include #include #include #include #include +#include "Common/Logger.h" #include #include @@ -15,6 +17,10 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +namespace MergeTreeSetting +{ + extern MergeTreeSettingsBool enable_index_granularity_compression; +} MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeMutableDataPartPtr & data_part, @@ -23,12 +29,12 @@ MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeIndices & skip_indices, const ColumnsStatistics & statistics, CompressionCodecPtr default_codec_, + MergeTreeIndexGranularityPtr index_granularity_ptr, TransactionID tid, bool reset_columns_, bool save_marks_in_cache, bool blocks_are_granules_size, - const WriteSettings & write_settings_, - const MergeTreeIndexGranularity & computed_index_granularity) + const WriteSettings & write_settings_) : IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, reset_columns_) , columns_list(columns_list_) , default_codec(default_codec_) @@ -53,11 +59,22 @@ MergedBlockOutputStream::MergedBlockOutputStream( data_part->storeVersionMetadata(); writer = createMergeTreeDataPartWriter(data_part->getType(), - data_part->name, data_part->storage.getLogName(), data_part->getSerializations(), - data_part_storage, data_part->index_granularity_info, - storage_settings, - columns_list, data_part->getColumnPositions(), metadata_snapshot, data_part->storage.getVirtualsPtr(), - skip_indices, statistics, data_part->getMarksFileExtension(), default_codec, writer_settings, computed_index_granularity); + data_part->name, + data_part->storage.getLogName(), + data_part->getSerializations(), + data_part_storage, + data_part->index_granularity_info, + storage_settings, + columns_list, + data_part->getColumnPositions(), + metadata_snapshot, + data_part->storage.getVirtualsPtr(), + skip_indices, + statistics, + data_part->getMarksFileExtension(), + default_codec, + writer_settings, + std::move(index_granularity_ptr)); } /// If data is pre-sorted. @@ -209,6 +226,12 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->index_granularity = writer->getIndexGranularity(); new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); + if ((*new_part->storage.getSettings())[MergeTreeSetting::enable_index_granularity_compression]) + { + if (auto new_index_granularity = new_part->index_granularity->optimize()) + new_part->index_granularity = std::move(new_index_granularity); + } + /// In mutation, existing_rows_count is already calculated in PartMergerWriter /// In merge situation, lightweight deleted rows was physically deleted, existing_rows_count equals rows_count if (!new_part->existing_rows_count.has_value()) diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index 060778866e0..94bdb2c491b 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -22,12 +22,12 @@ public: const MergeTreeIndices & skip_indices, const ColumnsStatistics & statistics, CompressionCodecPtr default_codec_, + MergeTreeIndexGranularityPtr index_granularity_ptr, TransactionID tid, bool reset_columns_ = false, bool save_marks_in_cache = false, bool blocks_are_granules_size = false, - const WriteSettings & write_settings = {}, - const MergeTreeIndexGranularity & computed_index_granularity = {}); + const WriteSettings & write_settings = {}); Block getHeader() const { return metadata_snapshot->getSampleBlock(); } diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index bed539dfe02..4163bceeab7 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -15,25 +15,25 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, - CompressionCodecPtr default_codec, const MergeTreeIndices & indices_to_recalc, - const ColumnsStatistics & stats_to_recalc_, - WrittenOffsetColumns * offset_columns_, - bool save_marks_in_cache, - const MergeTreeIndexGranularity & index_granularity, - const MergeTreeIndexGranularityInfo * index_granularity_info) + const ColumnsStatistics & stats_to_recalc, + CompressionCodecPtr default_codec, + MergeTreeIndexGranularityPtr index_granularity_ptr, + WrittenOffsetColumns * offset_columns, + bool save_marks_in_cache) : IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, /*reset_columns=*/ true) { const auto & global_settings = data_part->storage.getContext()->getSettingsRef(); + /// Granularity is never recomputed while writing only columns. MergeTreeWriterSettings writer_settings( global_settings, data_part->storage.getContext()->getWriteSettings(), storage_settings, - index_granularity_info ? index_granularity_info->mark_type.adaptive : data_part->storage.canUseAdaptiveGranularity(), - /* rewrite_primary_key = */ false, + data_part->index_granularity_info.mark_type.adaptive, + /*rewrite_primary_key=*/ false, save_marks_in_cache, - /* blocks_are_granules_size = */ false); + /*blocks_are_granules_size=*/ false); writer = createMergeTreeDataPartWriter( data_part->getType(), @@ -45,17 +45,17 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( metadata_snapshot_, data_part->storage.getVirtualsPtr(), indices_to_recalc, - stats_to_recalc_, + stats_to_recalc, data_part->getMarksFileExtension(), default_codec, writer_settings, - index_granularity); + std::move(index_granularity_ptr)); auto * writer_on_disk = dynamic_cast(writer.get()); if (!writer_on_disk) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergedColumnOnlyOutputStream supports only parts stored on disk"); - writer_on_disk->setWrittenOffsetColumns(offset_columns_); + writer_on_disk->setWrittenOffsetColumns(offset_columns); } void MergedColumnOnlyOutputStream::write(const Block & block) diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index f6bf9e37a58..0338273e96c 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -18,13 +18,12 @@ public: const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, - CompressionCodecPtr default_codec_, - const MergeTreeIndices & indices_to_recalc_, - const ColumnsStatistics & stats_to_recalc_, - WrittenOffsetColumns * offset_columns_ = nullptr, - bool save_marks_in_cache = false, - const MergeTreeIndexGranularity & index_granularity = {}, - const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr); + const MergeTreeIndices & indices_to_recalc, + const ColumnsStatistics & stats_to_recalc, + CompressionCodecPtr default_codec, + MergeTreeIndexGranularityPtr index_granularity_ptr, + WrittenOffsetColumns * offset_columns = nullptr, + bool save_marks_in_cache = false); void write(const Block & block) override; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 753b0c5d2fe..97c3de40132 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -35,6 +35,7 @@ #include #include #include +#include "Storages/MergeTree/MergeTreeIndexGranularity.h" #include @@ -1597,7 +1598,6 @@ private: ctx->minmax_idx = std::make_shared(); - MergeTreeIndexGranularity computed_granularity; bool has_delete = false; for (auto & command_for_interpreter : ctx->for_interpreter) @@ -1610,9 +1610,21 @@ private: } } + MergeTreeIndexGranularityPtr index_granularity_ptr; /// Reuse source part granularity if mutation does not change number of rows if (!has_delete && ctx->execute_ttl_type == ExecuteTTLType::NONE) - computed_granularity = ctx->source_part->index_granularity; + { + index_granularity_ptr = ctx->source_part->index_granularity; + } + else + { + index_granularity_ptr = createMergeTreeIndexGranularity( + ctx->new_data_part->rows_count, + ctx->new_data_part->getBytesUncompressedOnDisk(), + *ctx->data->getSettings(), + ctx->new_data_part->index_granularity_info, + /*blocks_are_granules=*/ false); + } ctx->out = std::make_shared( ctx->new_data_part, @@ -1621,12 +1633,12 @@ private: skip_indices, stats_to_rewrite, ctx->compression_codec, + std::move(index_granularity_ptr), ctx->txn ? ctx->txn->tid : Tx::PrehistoricTID, /*reset_columns=*/ true, /*save_marks_in_cache=*/ false, /*blocks_are_granules_size=*/ false, - ctx->context->getWriteSettings(), - computed_granularity); + ctx->context->getWriteSettings()); ctx->mutating_pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); ctx->mutating_pipeline.setProgressCallback(ctx->progress_callback); @@ -1848,14 +1860,10 @@ private: ctx->new_data_part, ctx->metadata_snapshot, ctx->updated_header.getNamesAndTypesList(), - ctx->compression_codec, std::vector(ctx->indices_to_recalc.begin(), ctx->indices_to_recalc.end()), ColumnsStatistics(ctx->stats_to_recalc.begin(), ctx->stats_to_recalc.end()), - nullptr, - /*save_marks_in_cache=*/ false, - ctx->source_part->index_granularity, - &ctx->source_part->index_granularity_info - ); + ctx->compression_codec, + ctx->source_part->index_granularity); ctx->mutating_pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); ctx->mutating_pipeline.setProgressCallback(ctx->progress_callback); diff --git a/src/Storages/MergeTree/RangesInDataPart.cpp b/src/Storages/MergeTree/RangesInDataPart.cpp index 50e0781b4e6..8256c20d81d 100644 --- a/src/Storages/MergeTree/RangesInDataPart.cpp +++ b/src/Storages/MergeTree/RangesInDataPart.cpp @@ -99,7 +99,7 @@ size_t RangesInDataPart::getMarksCount() const size_t RangesInDataPart::getRowsCount() const { - return data_part->index_granularity.getRowsCountInRanges(ranges); + return data_part->index_granularity->getRowsCountInRanges(ranges); } diff --git a/src/Storages/StorageMergeTreeIndex.cpp b/src/Storages/StorageMergeTreeIndex.cpp index 1d641add275..35e966b5489 100644 --- a/src/Storages/StorageMergeTreeIndex.cpp +++ b/src/Storages/StorageMergeTreeIndex.cpp @@ -63,7 +63,7 @@ protected: marks_loader = createMarksLoader(part, MergeTreeDataPartCompact::DATA_FILE_NAME, part->getColumns().size()); size_t num_columns = header.columns(); - size_t num_rows = index_granularity.getMarksCount(); + size_t num_rows = index_granularity->getMarksCount(); const auto & part_name_column = StorageMergeTreeIndex::part_name_column; const auto & mark_number_column = StorageMergeTreeIndex::mark_number_column; @@ -115,7 +115,7 @@ protected: data.resize(num_rows); for (size_t i = 0; i < num_rows; ++i) - data[i] = index_granularity.getMarkRows(i); + data[i] = index_granularity->getMarkRows(i); result_columns[pos] = std::move(column); } @@ -159,7 +159,7 @@ private: { size_t col_idx = 0; bool has_marks_in_part = false; - size_t num_rows = part->index_granularity.getMarksCount(); + size_t num_rows = part->index_granularity->getMarksCount(); if (isWidePart(part)) { From c0e239a14a6d0527a9354fa8aa05220397ac6d4f Mon Sep 17 00:00:00 2001 From: justindeguzman Date: Mon, 11 Nov 2024 10:58:45 -0800 Subject: [PATCH 157/267] [Docs] Update links to use official Docker image --- docs/en/getting-started/install.md | 2 +- docs/ru/getting-started/install.md | 2 +- docs/zh/getting-started/install.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 6209ef3c8ee..62071ddb722 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -16,7 +16,7 @@ You have four options for getting up and running with ClickHouse: - **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse - **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse - **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture -- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub +- **[Docker Image](https://hub.docker.com/_/clickhouse):** use the official Docker image in Docker Hub ## ClickHouse Cloud diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 083ddc8c39c..4b4b018697e 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -154,7 +154,7 @@ sudo "clickhouse-client-$LATEST_VERSION/install/doinst.sh" ### Из Docker образа {#from-docker-image} -Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/clickhouse/clickhouse-server/). Внутри образов используются официальные `deb`-пакеты. +Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/_/clickhouse). Внутри образов используются официальные `deb`-пакеты. ### Из единого бинарного файла {#from-single-binary} diff --git a/docs/zh/getting-started/install.md b/docs/zh/getting-started/install.md index 7e4fb6826e4..8a9c4cd1c60 100644 --- a/docs/zh/getting-started/install.md +++ b/docs/zh/getting-started/install.md @@ -132,7 +132,7 @@ sudo "clickhouse-client-$LATEST_VERSION/install/doinst.sh" ### `Docker`安装包 {#from-docker-image} -要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/r/clickhouse/clickhouse-server/)上的指南。它是官方的`deb`安装包。 +要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/_/clickhouse)上的指南。它是官方的`deb`安装包。 ### 其他环境安装包 {#from-other} From 06debdc479bab58f2d1d7fd4b3764e65a8c9fa01 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 8 Nov 2024 17:48:17 +0100 Subject: [PATCH 158/267] result with versioning --- ci/docker/stateless-test/Dockerfile | 6 +- ci/jobs/build_clickhouse.py | 1 - ci/jobs/functional_stateless_tests.py | 11 +- ci/jobs/scripts/clickhouse_proc.py | 11 + .../setup_hdfs_minicluster.sh | 19 ++ ci/praktika/__main__.py | 7 + ci/praktika/_environment.py | 14 +- ci/praktika/_settings.py | 128 ---------- ci/praktika/digest.py | 38 +-- ci/praktika/hook_cache.py | 11 +- ci/praktika/hook_html.py | 71 ++---- ci/praktika/json.html | 11 +- ci/praktika/mangle.py | 36 +-- ci/praktika/native_jobs.py | 14 +- ci/praktika/result.py | 240 +++++++++++++++++- ci/praktika/runner.py | 18 +- ci/praktika/runtime.py | 6 + ci/praktika/s3.py | 172 ++----------- ci/praktika/settings.py | 156 +++++++++++- ci/praktika/utils.py | 2 - ci/praktika/validator.py | 8 +- ci/workflows/pull_request.py | 1 + 22 files changed, 551 insertions(+), 430 deletions(-) create mode 100755 ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh delete mode 100644 ci/praktika/_settings.py diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index 4abd8204f1d..760fceeebbf 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -100,8 +100,12 @@ ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ && tar -xvf hadoop-3.3.1.tar.gz \ - && rm -rf hadoop-3.3.1.tar.gz + && rm -rf hadoop-3.3.1.tar.gz \ + && chmod 777 /hadoop-3.3.1 RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node + +RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse +USER clickhouse \ No newline at end of file diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 1e6d2c648a7..3bdc23d383c 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -127,7 +127,6 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index 0481086d80a..390a6336b45 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -27,11 +27,12 @@ def parse_args(): default="", ) parser.add_argument("--param", help="Optional job start stage", default=None) + parser.add_argument("--test", help="Optional test name pattern", default="") return parser.parse_args() def run_stateless_test( - no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test="" ): assert not (no_parallel and no_sequiential) test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" @@ -43,7 +44,7 @@ def run_stateless_test( --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ - --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + --queries ./tests/queries -- '{test}' | ts '%Y-%m-%d %H:%M:%S' \ | tee -a \"{test_output_file}\"" if Path(test_output_file).exists(): Path(test_output_file).unlink() @@ -119,11 +120,14 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) + hdfs_log = "/tmp/praktika/output/hdfs_mini.log" minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_hdfs(log_file_path=hdfs_log) res = res and CH.start_minio(log_file_path=minio_log) - logs_to_attach += [minio_log] + logs_to_attach += [minio_log, hdfs_log] time.sleep(10) Shell.check("ps -ef | grep minio", verbose=True) + Shell.check("ps -ef | grep hdfs", verbose=True) res = res and Shell.check( "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True ) @@ -153,6 +157,7 @@ def main(): no_sequiential=no_sequential, batch_num=batch_num, batch_total=total_batches, + test=args.test, ) results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) results[-1].set_timing(stopwatch=stop_watch_) diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index c43283e75e0..8f9bef57083 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -44,6 +44,17 @@ class ClickHouseProc: self.minio_proc = None + def start_hdfs(self, log_file_path): + command = ["./ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT + ) + print( + f"Started setup_hdfs_minicluster.sh asynchronously with PID {process.pid}" + ) + return True + def start_minio(self, log_file_path): command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] with open(log_file_path, "w") as log_file: diff --git a/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh new file mode 100755 index 00000000000..b810b27fe2b --- /dev/null +++ b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# shellcheck disable=SC2024 + +set -e -x -a -u + +ls -lha + +cd /hadoop-3.3.1 + +export JAVA_HOME=/usr +mkdir -p target/test/data + +bin/mapred minicluster -format -nomr -nnport 12222 & + +while ! nc -z localhost 12222; do + sleep 1 +done + +lsof -i :12222 diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index fbb9f92909a..3dfdc26d69d 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,12 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--test", + help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test", + type=str, + default="", + ) run_parser.add_argument( "--pr", help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", @@ -106,6 +112,7 @@ if __name__ == "__main__": local_run=not args.ci, no_docker=args.no_docker, param=args.param, + test=args.test, pr=args.pr, branch=args.branch, sha=args.sha, diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 1c6b547ddde..734a4be3176 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -6,7 +6,7 @@ from types import SimpleNamespace from typing import Any, Dict, List, Type from praktika import Workflow -from praktika._settings import _Settings +from praktika.settings import Settings from praktika.utils import MetaClasses, T @@ -35,7 +35,7 @@ class _Environment(MetaClasses.Serializable): @classmethod def file_name_static(cls, _name=""): - return f"{_Settings.TEMP_DIR}/{cls.name}.json" + return f"{Settings.TEMP_DIR}/{cls.name}.json" @classmethod def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T: @@ -66,12 +66,12 @@ class _Environment(MetaClasses.Serializable): @staticmethod def get_needs_statuses(): - if Path(_Settings.WORKFLOW_STATUS_FILE).is_file(): - with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: + if Path(Settings.WORKFLOW_STATUS_FILE).is_file(): + with open(Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: return json.load(f) else: print( - f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist" + f"ERROR: Status file [{Settings.WORKFLOW_STATUS_FILE}] does not exist" ) raise RuntimeError() @@ -171,7 +171,7 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self, settings): + def get_report_url(self, settings, latest=False): import urllib path = settings.HTML_S3_PATH @@ -179,7 +179,7 @@ class _Environment(MetaClasses.Serializable): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={'latest' if latest else self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py deleted file mode 100644 index 17da1519e37..00000000000 --- a/ci/praktika/_settings.py +++ /dev/null @@ -1,128 +0,0 @@ -import dataclasses -from typing import Dict, Iterable, List, Optional - - -@dataclasses.dataclass -class _Settings: - ###################################### - # Pipeline generation settings # - ###################################### - MAIN_BRANCH = "main" - CI_PATH = "./ci" - WORKFLOW_PATH_PREFIX: str = "./.github/workflows" - WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" - SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" - CI_CONFIG_JOB_NAME = "Config Workflow" - DOCKER_BUILD_JOB_NAME = "Docker Builds" - FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" - READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" - CI_CONFIG_RUNS_ON: Optional[List[str]] = None - DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None - VALIDATE_FILE_PATHS: bool = True - - ###################################### - # Runtime Settings # - ###################################### - MAX_RETRIES_S3 = 3 - MAX_RETRIES_GH = 3 - - ###################################### - # S3 (artifact storage) settings # - ###################################### - S3_ARTIFACT_PATH: str = "" - - ###################################### - # CI workspace settings # - ###################################### - TEMP_DIR: str = "/tmp/praktika" - OUTPUT_DIR: str = f"{TEMP_DIR}/output" - INPUT_DIR: str = f"{TEMP_DIR}/input" - PYTHON_INTERPRETER: str = "python3" - PYTHON_PACKET_MANAGER: str = "pip3" - PYTHON_VERSION: str = "3.9" - INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False - INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" - ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" - RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" - - SECRET_GH_APP_ID: str = "GH_APP_ID" - SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" - - ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" - WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" - - ###################################### - # CI Cache settings # - ###################################### - CACHE_VERSION: int = 1 - CACHE_DIGEST_LEN: int = 20 - CACHE_S3_PATH: str = "" - CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" - - ###################################### - # Report settings # - ###################################### - HTML_S3_PATH: str = "" - HTML_PAGE_FILE: str = "./praktika/json.html" - TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) - S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None - - DOCKERHUB_USERNAME: str = "" - DOCKERHUB_SECRET: str = "" - DOCKER_WD: str = "/wd" - - ###################################### - # CI DB Settings # - ###################################### - SECRET_CI_DB_URL: str = "CI_DB_URL" - SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" - CI_DB_DB_NAME = "" - CI_DB_TABLE_NAME = "" - CI_DB_INSERT_TIMEOUT_SEC = 5 - - DISABLE_MERGE_COMMIT = True - - -_USER_DEFINED_SETTINGS = [ - "S3_ARTIFACT_PATH", - "CACHE_S3_PATH", - "HTML_S3_PATH", - "S3_BUCKET_TO_HTTP_ENDPOINT", - "TEXT_CONTENT_EXTENSIONS", - "TEMP_DIR", - "OUTPUT_DIR", - "INPUT_DIR", - "CI_CONFIG_RUNS_ON", - "DOCKER_BUILD_RUNS_ON", - "CI_CONFIG_JOB_NAME", - "PYTHON_INTERPRETER", - "PYTHON_VERSION", - "PYTHON_PACKET_MANAGER", - "INSTALL_PYTHON_FOR_NATIVE_JOBS", - "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", - "MAX_RETRIES_S3", - "MAX_RETRIES_GH", - "VALIDATE_FILE_PATHS", - "DOCKERHUB_USERNAME", - "DOCKERHUB_SECRET", - "READY_FOR_MERGE_STATUS_NAME", - "SECRET_CI_DB_URL", - "SECRET_CI_DB_PASSWORD", - "CI_DB_DB_NAME", - "CI_DB_TABLE_NAME", - "CI_DB_INSERT_TIMEOUT_SEC", - "SECRET_GH_APP_PEM_KEY", - "SECRET_GH_APP_ID", - "MAIN_BRANCH", - "DISABLE_MERGE_COMMIT", -] - - -class GHRunners: - ubuntu = "ubuntu-latest" - - -if __name__ == "__main__": - for setting in _USER_DEFINED_SETTINGS: - print(_Settings().__getattribute__(setting)) - # print(dataclasses.asdict(_Settings())) diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index a1f2eecf9b6..6b7e5eec07b 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -23,7 +23,7 @@ class Digest: hash_string = hash_obj.hexdigest() return hash_string - def calc_job_digest(self, job_config: Job.Config): + def calc_job_digest(self, job_config: Job.Config, docker_digests): config = job_config.digest_config if not config: return "f" * Settings.CACHE_DIGEST_LEN @@ -34,28 +34,28 @@ class Digest: print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" ) - return self.digest_cache[cache_key] - - included_files = Utils.traverse_paths( - job_config.digest_config.include_paths, - job_config.digest_config.exclude_paths, - sorted=True, - ) - print( - f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" - ) - - # Calculate MD5 hash - res = "" - if not included_files: - res = "f" * Settings.CACHE_DIGEST_LEN - print(f"NOTE: empty digest config [{config}] - return dummy digest") + digest = self.digest_cache[cache_key] else: + included_files = Utils.traverse_paths( + job_config.digest_config.include_paths, + job_config.digest_config.exclude_paths, + sorted=True, + ) + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" + ) + hash_md5 = hashlib.md5() for i, file_path in enumerate(included_files): hash_md5 = self._calc_file_digest(file_path, hash_md5) - digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] - self.digest_cache[cache_key] = digest + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + + if job_config.run_in_docker: + # respect docker digest in the job digest + docker_digest = docker_digests[job_config.run_in_docker.split("+")[0]] + digest = "-".join([docker_digest, digest]) + return digest def calc_docker_digest( diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index 5cfedec0144..e001e936a71 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -1,6 +1,5 @@ from praktika._environment import _Environment from praktika.cache import Cache -from praktika.mangle import _get_workflows from praktika.runtime import RunConfig from praktika.settings import Settings from praktika.utils import Utils @@ -10,6 +9,7 @@ class CacheRunnerHooks: @classmethod def configure(cls, workflow): workflow_config = RunConfig.from_fs(workflow.name) + docker_digests = workflow_config.digest_dockers cache = Cache() print(f"Workflow Configure, workflow [{workflow.name}]") assert ( @@ -18,11 +18,13 @@ class CacheRunnerHooks: artifact_digest_map = {} job_digest_map = {} for job in workflow.jobs: + digest = cache.digest.calc_job_digest( + job_config=job, docker_digests=docker_digests + ) if not job.digest_config: print( f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run" ) - digest = cache.digest.calc_job_digest(job_config=job) job_digest_map[job.name] = digest if job.provides: # assign the job digest also to the artifacts it provides @@ -48,7 +50,6 @@ class CacheRunnerHooks: ), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]" print("Check remote cache") - job_to_cache_record = {} for job_name, job_digest in workflow_config.digest_jobs.items(): record = cache.fetch_success(job_name=job_name, job_digest=job_digest) if record: @@ -58,7 +59,7 @@ class CacheRunnerHooks: ) workflow_config.cache_success.append(job_name) workflow_config.cache_success_base64.append(Utils.to_base64(job_name)) - job_to_cache_record[job_name] = record + workflow_config.cache_jobs[job_name] = record print("Check artifacts to reuse") for job in workflow.jobs: @@ -66,7 +67,7 @@ class CacheRunnerHooks: if job.provides: for artifact_name in job.provides: workflow_config.cache_artifacts[artifact_name] = ( - job_to_cache_record[job.name] + workflow_config.cache_jobs[job.name] ) print(f"Write config to GH's job output") diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index ca2692d1b22..e2faefb2fa9 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -6,7 +6,7 @@ from typing import List from praktika._environment import _Environment from praktika.gh import GH from praktika.parser import WorkflowConfigParser -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings @@ -119,6 +119,7 @@ class HtmlRunnerHooks: # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success + job_cache_records = RunConfig.from_fs(_workflow.name).cache_jobs else: skip_jobs = [] @@ -128,21 +129,14 @@ class HtmlRunnerHooks: if job.name not in skip_jobs: result = Result.generate_pending(job.name) else: - result = Result.generate_skipped(job.name) + result = Result.generate_skipped(job.name, job_cache_records[job.name]) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) summary_result.links.append(env.CHANGE_URL) summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - # clean the previous latest results in PR if any - if env.PR_NUMBER: - S3.clean_latest_result() - S3.copy_result_to_s3( - summary_result, - unlock=False, - ) - + assert _ResultS3.copy_result_to_s3_with_version(summary_result, version=0) page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") @@ -150,7 +144,7 @@ class HtmlRunnerHooks: name=_workflow.name, status=Result.Status.PENDING, description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -167,14 +161,8 @@ class HtmlRunnerHooks: @classmethod def pre_run(cls, _workflow, _job): result = Result.from_fs(_job.name) - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - ) - workflow_result = Result.from_fs(_workflow.name) - workflow_result.update_sub_result(result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + _ResultS3.update_workflow_results( + workflow_name=_workflow.name, new_sub_results=result ) @classmethod @@ -184,14 +172,13 @@ class HtmlRunnerHooks: @classmethod def post_run(cls, _workflow, _job, info_errors): result = Result.from_fs(_job.name) - env = _Environment.get() - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - lock=True, - ) - workflow_result = Result.from_fs(_workflow.name) - print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]") + _ResultS3.upload_result_files_to_s3(result) + _ResultS3.copy_result_to_s3(result) + env = _Environment.get() + + new_sub_results = [result] + new_result_info = "" env_info = env.REPORT_INFO if env_info: print( @@ -203,14 +190,8 @@ class HtmlRunnerHooks: info_str = f"{_job.name}:\n" info_str += "\n".join(info_errors) print("Update workflow results with new info") - workflow_result.set_info(info_str) + new_result_info = info_str - old_status = workflow_result.status - - S3.upload_result_files_to_s3(result) - workflow_result.update_sub_result(result) - - skipped_job_results = [] if not result.is_ok(): print( "Current job failed - find dependee jobs in the workflow and set their statuses to skipped" @@ -223,7 +204,7 @@ class HtmlRunnerHooks: print( f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure" ) - skipped_job_results.append( + new_sub_results.append( Result( name=dependee_job.name, status=Result.Status.SKIPPED, @@ -231,20 +212,18 @@ class HtmlRunnerHooks: + f" [{_job.name}]", ) ) - for skipped_job_result in skipped_job_results: - workflow_result.update_sub_result(skipped_job_result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + updated_status = _ResultS3.update_workflow_results( + new_info=new_result_info, + new_sub_results=new_sub_results, + workflow_name=_workflow.name, ) - if workflow_result.status != old_status: - print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" - ) + + if updated_status: + print(f"Update GH commit status [{result.name}]: [{updated_status}]") GH.post_commit_status( - name=workflow_result.name, - status=GH.convert_to_gh_status(workflow_result.status), + name=_workflow.name, + status=GH.convert_to_gh_status(updated_status), description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 4e15a67ba76..544fd6e68d4 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -342,7 +342,7 @@ const milliseconds = Math.floor((duration % 1) * 1000); const formattedSeconds = String(seconds); - const formattedMilliseconds = String(milliseconds).padStart(3, '0'); + const formattedMilliseconds = String(milliseconds).padStart(2, '0').slice(-2); return `${formattedSeconds}.${formattedMilliseconds}`; } @@ -600,8 +600,7 @@ td.classList.add('time-column'); td.textContent = value ? formatDuration(value) : ''; } else if (column === 'info') { - // For info and other columns, just display the value - td.textContent = value || ''; + td.textContent = value.includes('\n') ? '↵' : (value || ''); td.classList.add('info-column'); } @@ -675,7 +674,8 @@ } if (targetData) { - infoElement.style.display = 'none'; + //infoElement.style.display = 'none'; + infoElement.innerHTML = (targetData.info || '').replace(/\n/g, '
'); addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) @@ -804,7 +804,8 @@ // Check if all required parameters are present to load JSON if (PR && sha && root_name) { - loadResultsJSON(PR, sha, nameParams); + const shaToLoad = (sha === 'latest') ? commitsArray[commitsArray.length - 1] : sha; + loadResultsJSON(PR, shaToLoad, nameParams); } else { document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; } diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index b16d52fbbbf..f94b11adad5 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -1,11 +1,10 @@ import copy import importlib.util from pathlib import Path -from typing import Any, Dict from praktika import Job -from praktika._settings import _USER_DEFINED_SETTINGS, _Settings -from praktika.utils import ContextManager, Utils +from praktika.settings import Settings +from praktika.utils import Utils def _get_workflows(name=None, file=None): @@ -14,13 +13,13 @@ def _get_workflows(name=None, file=None): """ res = [] - directory = Path(_Settings.WORKFLOWS_DIRECTORY) + directory = Path(Settings.WORKFLOWS_DIRECTORY) for py_file in directory.glob("*.py"): if file and file not in str(py_file): continue module_name = py_file.name.removeprefix(".py") spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + module_name, f"{Settings.WORKFLOWS_DIRECTORY}/{module_name}" ) assert spec foo = importlib.util.module_from_spec(spec) @@ -106,30 +105,3 @@ def _update_workflow_with_native_jobs(workflow): for job in workflow.jobs: aux_job.requires.append(job.name) workflow.jobs.append(aux_job) - - -def _get_user_settings() -> Dict[str, Any]: - """ - Gets user's settings - """ - res = {} # type: Dict[str, Any] - - directory = Path(_Settings.SETTINGS_DIRECTORY) - for py_file in directory.glob("*.py"): - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - for setting in _USER_DEFINED_SETTINGS: - try: - value = getattr(foo, setting) - res[setting] = value - print(f"Apply user defined setting [{setting} = {value}]") - except Exception as e: - pass - - return res diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 58af211988b..52bf6c6e204 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -10,9 +10,8 @@ from praktika.gh import GH from praktika.hook_cache import CacheRunnerHooks from praktika.hook_html import HtmlRunnerHooks from praktika.mangle import _get_workflows -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig -from praktika.s3 import S3 from praktika.settings import Settings from praktika.utils import Shell, Utils @@ -225,6 +224,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ).dump() # checks: @@ -310,9 +310,8 @@ def _finish_workflow(workflow, job_name): print(env.get_needs_statuses()) print("Check Workflow results") - S3.copy_result_from_s3( + _ResultS3.copy_result_from_s3( Result.file_name_static(workflow.name), - lock=False, ) workflow_result = Result.from_fs(workflow.name) @@ -345,7 +344,7 @@ def _finish_workflow(workflow, job_name): failed_results.append(result.name) if failed_results: - ready_for_merge_description = f"failed: {', '.join(failed_results)}" + ready_for_merge_description = f"Failed: {', '.join(failed_results)}" if not GH.post_commit_status( name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]", @@ -357,10 +356,9 @@ def _finish_workflow(workflow, job_name): env.add_info(ResultInfo.GH_STATUS_ERROR) if update_final_report: - S3.copy_result_to_s3( + _ResultS3.copy_result_to_s3( workflow_result, - unlock=False, - ) # no lock - no unlock + ) Result.from_fs(job_name).set_status(Result.Status.SUCCESS) diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 842deacbcbd..8164b1d1295 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -2,10 +2,12 @@ import dataclasses import datetime import sys from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from praktika._environment import _Environment -from praktika._settings import _Settings +from praktika.cache import Cache +from praktika.s3 import S3 +from praktika.settings import Settings from praktika.utils import ContextManager, MetaClasses, Shell, Utils @@ -55,7 +57,7 @@ class Result(MetaClasses.Serializable): stopwatch: Utils.Stopwatch = None, status="", files=None, - info="", + info: Union[List[str], str] = "", with_info_from_results=True, ): if isinstance(status, bool): @@ -149,7 +151,7 @@ class Result(MetaClasses.Serializable): @classmethod def file_name_static(cls, name): - return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" + return f"{Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" @classmethod def from_dict(cls, obj: Dict[str, Any]) -> "Result": @@ -232,7 +234,7 @@ class Result(MetaClasses.Serializable): ) @classmethod - def generate_skipped(cls, name, results=None): + def generate_skipped(cls, name, cache_record: Cache.CacheRecord, results=None): return Result( name=name, status=Result.Status.SKIPPED, @@ -241,7 +243,7 @@ class Result(MetaClasses.Serializable): results=results or [], files=[], links=[], - info="from cache", + info=f"from cache: sha [{cache_record.sha}], pr/branch [{cache_record.pr_number or cache_record.branch}]", ) @classmethod @@ -275,7 +277,7 @@ class Result(MetaClasses.Serializable): # Set log file path if logging is enabled log_file = ( - f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" + f"{Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" if with_log else None ) @@ -321,14 +323,31 @@ class Result(MetaClasses.Serializable): self.dump() if not self.is_ok(): print("ERROR: Job Failed") - for result in self.results: - if not result.is_ok(): - print("Failed checks:") - print(" | ", result) + print(self.to_stdout_formatted()) sys.exit(1) else: print("ok") + def to_stdout_formatted(self, indent="", res=""): + if self.is_ok(): + return res + + res += f"{indent}Task [{self.name}] failed.\n" + fail_info = "" + sub_indent = indent + " " + + if not self.results: + if not self.is_ok(): + fail_info += f"{sub_indent}{self.name}:\n" + for line in self.info.splitlines(): + fail_info += f"{sub_indent}{sub_indent}{line}\n" + return res + fail_info + + for sub_result in self.results: + res = sub_result.to_stdout_formatted(sub_indent, res) + + return res + class ResultInfo: SETUP_ENV_JOB_FAILED = ( @@ -351,3 +370,202 @@ class ResultInfo: ) S3_ERROR = "S3 call failure" + + +class _ResultS3: + + @classmethod + def copy_result_to_s3(cls, result, unlock=False): + result.dump() + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" + s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" + url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) + # if unlock: + # if not cls.unlock(s3_path_full): + # print(f"ERROR: File [{s3_path_full}] unlock failure") + # assert False # TODO: investigate + return url + + @classmethod + def copy_result_from_s3(cls, local_path, lock=False): + env = _Environment.get() + file_name = Path(local_path).name + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" + # if lock: + # cls.lock(s3_path) + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + + @classmethod + def copy_result_from_s3_with_version(cls, local_path): + env = _Environment.get() + file_name = Path(local_path).name + local_dir = Path(local_path).parent + file_name_pattern = f"{file_name}_*" + for file_path in local_dir.glob(file_name_pattern): + file_path.unlink() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if not S3.copy_file_from_s3_matching_pattern( + s3_path=s3_path, local_path=local_dir, include=file_name_pattern + ): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + result_files = [] + for file_path in local_dir.glob(file_name_pattern): + result_files.append(file_path) + assert result_files, "No result files found" + result_files.sort() + version = int(result_files[-1].name.split("_")[-1]) + Shell.check(f"cp {result_files[-1]} {local_path}", strict=True, verbose=True) + return version + + @classmethod + def copy_result_to_s3_with_version(cls, result, version): + result.dump() + filename = Path(result.file_name()).name + file_name_versioned = f"{filename}_{str(version).zfill(3)}" + env = _Environment.get() + s3_path_versioned = ( + f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name_versioned}" + ) + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if version == 0: + S3.clean_s3_directory(s3_path=s3_path) + if not S3.put( + s3_path=s3_path_versioned, + local_path=result.file_name(), + if_none_matched=True, + ): + print("Failed to put versioned Result") + return False + if not S3.put(s3_path=s3_path, local_path=result.file_name()): + print("Failed to put non-versioned Result") + return True + + # @classmethod + # def lock(cls, s3_path, level=0): + # env = _Environment.get() + # s3_path_lock = s3_path + f".lock" + # file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" + # assert Shell.check( + # f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True + # ), "Never" + # + # i = 20 + # meta = S3.head_object(s3_path_lock) + # while meta: + # locked_by_job = meta.get("Metadata", {"job": ""}).get("job", "") + # if locked_by_job: + # decoded_bytes = base64.b64decode(locked_by_job) + # locked_by_job = decoded_bytes.decode("utf-8") + # print( + # f"WARNING: Failed to acquire lock, meta [{meta}], job [{locked_by_job}] - wait" + # ) + # i -= 5 + # if i < 0: + # info = f"ERROR: lock acquire failure - unlock forcefully" + # print(info) + # env.add_info(info) + # break + # time.sleep(5) + # + # metadata = {"job": Utils.to_base64(env.JOB_NAME)} + # S3.put( + # s3_path=s3_path_lock, + # local_path=file_path_lock, + # metadata=metadata, + # if_none_matched=True, + # ) + # time.sleep(1) + # obj = S3.head_object(s3_path_lock) + # if not obj or not obj.has_tags(tags=metadata): + # print(f"WARNING: locked by another job [{obj}]") + # env.add_info("S3 lock file failure") + # cls.lock(s3_path, level=level + 1) + # print("INFO: lock acquired") + # + # @classmethod + # def unlock(cls, s3_path): + # s3_path_lock = s3_path + ".lock" + # env = _Environment.get() + # obj = S3.head_object(s3_path_lock) + # if not obj: + # print("ERROR: lock file is removed") + # assert False # investigate + # elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): + # print("ERROR: lock file was acquired by another job") + # assert False # investigate + # + # if not S3.delete(s3_path_lock): + # print(f"ERROR: File [{s3_path_lock}] delete failure") + # print("INFO: lock released") + # return True + + @classmethod + def upload_result_files_to_s3(cls, result): + if result.results: + for result_ in result.results: + cls.upload_result_files_to_s3(result_) + for file in result.files: + if not Path(file).is_file(): + print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") + result.info += f"\nWARNING: Result file [{file}] was not found" + file_link = S3._upload_file_to_s3(file, upload_to_s3=False) + else: + is_text = False + for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: + if file.endswith(text_file_suffix): + print( + f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" + ) + is_text = True + break + file_link = S3._upload_file_to_s3( + file, + upload_to_s3=True, + text=is_text, + s3_subprefix=Utils.normalize_string(result.name), + ) + result.links.append(file_link) + if result.files: + print( + f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" + ) + result.files = [] + result.dump() + + @classmethod + def update_workflow_results(cls, workflow_name, new_info="", new_sub_results=None): + assert new_info or new_sub_results + + attempt = 1 + prev_status = "" + new_status = "" + done = False + while attempt < 10: + version = cls.copy_result_from_s3_with_version( + Result.file_name_static(workflow_name) + ) + workflow_result = Result.from_fs(workflow_name) + prev_status = workflow_result.status + if new_info: + workflow_result.set_info(new_info) + if new_sub_results: + if isinstance(new_sub_results, Result): + new_sub_results = [new_sub_results] + for result_ in new_sub_results: + workflow_result.update_sub_result(result_) + new_status = workflow_result.status + if cls.copy_result_to_s3_with_version(workflow_result, version=version + 1): + done = True + break + print(f"Attempt [{attempt}] to upload workflow result failed") + attempt += 1 + assert done + + if prev_status != new_status: + return new_status + else: + return None diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 1ac8748d1c0..38112dd5684 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -52,6 +52,7 @@ class Runner: cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ) for docker in workflow.dockers: workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest( @@ -123,7 +124,7 @@ class Runner: return 0 - def _run(self, workflow, job, docker="", no_docker=False, param=None): + def _run(self, workflow, job, docker="", no_docker=False, param=None, test=""): # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name @@ -162,6 +163,9 @@ class Runner: if param: print(f"Custom --param [{param}] will be passed to job's script") cmd += f" --param {param}" + if test: + print(f"Custom --test [{test}] will be passed to job's script") + cmd += f" --test {test}" print(f"--- Run command [{cmd}]") with TeePopen(cmd, timeout=job.timeout) as process: @@ -240,10 +244,6 @@ class Runner: result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() - if result.info and result.status != Result.Status.SUCCESS: - # provide job info to workflow level - info_errors.append(result.info) - if run_exit_code == 0: providing_artifacts = [] if job.provides and workflow.artifacts: @@ -310,6 +310,7 @@ class Runner: local_run=False, no_docker=False, param=None, + test="", pr=None, sha=None, branch=None, @@ -358,7 +359,12 @@ class Runner: print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===") try: run_code = self._run( - workflow, job, docker=docker, no_docker=no_docker, param=param + workflow, + job, + docker=docker, + no_docker=no_docker, + param=param, + test=test, ) res = run_code == 0 if not res: diff --git a/ci/praktika/runtime.py b/ci/praktika/runtime.py index a87b67c2c79..07c24e0498c 100644 --- a/ci/praktika/runtime.py +++ b/ci/praktika/runtime.py @@ -15,17 +15,23 @@ class RunConfig(MetaClasses.Serializable): # there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this cache_success_base64: List[str] cache_artifacts: Dict[str, Cache.CacheRecord] + cache_jobs: Dict[str, Cache.CacheRecord] sha: str @classmethod def from_dict(cls, obj): cache_artifacts = obj["cache_artifacts"] + cache_jobs = obj["cache_jobs"] cache_artifacts_deserialized = {} + cache_jobs_deserialized = {} for artifact_name, cache_artifact in cache_artifacts.items(): cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict( cache_artifact ) obj["cache_artifacts"] = cache_artifacts_deserialized + for job_name, cache_jobs in cache_jobs.items(): + cache_jobs_deserialized[job_name] = Cache.CacheRecord.from_dict(cache_jobs) + obj["cache_jobs"] = cache_artifacts_deserialized return RunConfig(**obj) @classmethod diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 04a08622dcd..82034b57b80 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -1,12 +1,11 @@ import dataclasses import json -import time from pathlib import Path from typing import Dict from praktika._environment import _Environment from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Shell class S3: @@ -59,16 +58,15 @@ class S3: return f"https://{s3_full_path}".replace(bucket, endpoint) @classmethod - def put(cls, s3_path, local_path, text=False, metadata=None): + def put(cls, s3_path, local_path, text=False, metadata=None, if_none_matched=False): assert Path(local_path).exists(), f"Path [{local_path}] does not exist" assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" assert Path( local_path ).is_file(), f"Path [{local_path}] is not file. Only files are supported" - file_name = Path(local_path).name s3_full_path = s3_path - if not s3_full_path.endswith(file_name): - s3_full_path = f"{s3_path}/{Path(local_path).name}" + if s3_full_path.endswith("/"): + s3_full_path = f"{s3_path}{Path(local_path).name}" s3_full_path = str(s3_full_path).removeprefix("s3://") bucket, key = s3_full_path.split("/", maxsplit=1) @@ -76,6 +74,8 @@ class S3: command = ( f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}" ) + if if_none_matched: + command += f' --if-none-match "*"' if metadata: for k, v in metadata.items(): command += f" --metadata {k}={v}" @@ -84,7 +84,7 @@ class S3: if text: cmd += " --content-type text/plain" res = cls.run_command_with_retries(command) - assert res + return res @classmethod def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3): @@ -101,6 +101,14 @@ class S3: elif "does not exist" in stderr: print("ERROR: requested file does not exist") break + elif "Unknown options" in stderr: + print("ERROR: Invalid AWS CLI command or CLI client version:") + print(f" | awc error: {stderr}") + break + elif "PreconditionFailed" in stderr: + print("ERROR: AWS API Call Precondition Failed") + print(f" | awc error: {stderr}") + break if ret_code != 0: print( f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]" @@ -108,13 +116,6 @@ class S3: res = ret_code == 0 return res - @classmethod - def get_link(cls, s3_path, local_path): - s3_full_path = f"{s3_path}/{Path(local_path).name}" - bucket = s3_path.split("/")[0] - endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] - return f"https://{s3_full_path}".replace(bucket, endpoint) - @classmethod def copy_file_from_s3(cls, s3_path, local_path): assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" @@ -128,6 +129,19 @@ class S3: res = cls.run_command_with_retries(cmd) return res + @classmethod + def copy_file_from_s3_matching_pattern( + cls, s3_path, local_path, include, exclude="*" + ): + assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" + assert Path( + local_path + ).is_dir(), f"Path [{local_path}] does not exist or not a directory" + assert s3_path.endswith("/"), f"s3 path is invalid [{s3_path}]" + cmd = f'aws s3 cp s3://{s3_path} {local_path} --exclude "{exclude}" --include "{include}" --recursive' + res = cls.run_command_with_retries(cmd) + return res + @classmethod def head_object(cls, s3_path): s3_path = str(s3_path).removeprefix("s3://") @@ -148,103 +162,6 @@ class S3: verbose=True, ) - # TODO: apparently should be placed into separate file to be used only inside praktika - # keeping this module clean from importing Settings, Environment and etc, making it easy for use externally - @classmethod - def copy_result_to_s3(cls, result, unlock=True): - result.dump() - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if env.PR_NUMBER: - print("Duplicate Result for latest commit alias in PR") - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if unlock: - if not cls.unlock(s3_path_full): - print(f"ERROR: File [{s3_path_full}] unlock failure") - assert False # TODO: investigate - return url - - @classmethod - def copy_result_from_s3(cls, local_path, lock=True): - env = _Environment.get() - file_name = Path(local_path).name - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" - if lock: - cls.lock(s3_path) - if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): - print(f"ERROR: failed to cp file [{s3_path}] from s3") - raise - - @classmethod - def lock(cls, s3_path, level=0): - assert level < 3, "Never" - env = _Environment.get() - s3_path_lock = s3_path + f".lock" - file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" - assert Shell.check( - f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True - ), "Never" - - i = 20 - meta = S3.head_object(s3_path_lock) - while meta: - print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait") - i -= 5 - if i < 0: - info = f"ERROR: lock acquire failure - unlock forcefully" - print(info) - env.add_info(info) - break - time.sleep(5) - - metadata = {"job": Utils.to_base64(env.JOB_NAME)} - S3.put( - s3_path=s3_path_lock, - local_path=file_path_lock, - metadata=metadata, - ) - time.sleep(1) - obj = S3.head_object(s3_path_lock) - if not obj or not obj.has_tags(tags=metadata): - print(f"WARNING: locked by another job [{obj}]") - env.add_info("S3 lock file failure") - cls.lock(s3_path, level=level + 1) - print("INFO: lock acquired") - - @classmethod - def unlock(cls, s3_path): - s3_path_lock = s3_path + ".lock" - env = _Environment.get() - obj = S3.head_object(s3_path_lock) - if not obj: - print("ERROR: lock file is removed") - assert False # investigate - elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): - print("ERROR: lock file was acquired by another job") - assert False # investigate - - if not S3.delete(s3_path_lock): - print(f"ERROR: File [{s3_path_lock}] delete failure") - print("INFO: lock released") - return True - - @classmethod - def get_result_link(cls, result): - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}" - return S3.get_link(s3_path=s3_path, local_path=result.file_name()) - - @classmethod - def clean_latest_result(cls): - env = _Environment.get() - env.SHA = "latest" - assert env.PR_NUMBER - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - S3.clean_s3_directory(s3_path=s3_path) - @classmethod def _upload_file_to_s3( cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix="" @@ -260,36 +177,3 @@ class S3: ) return html_link return f"file://{Path(local_file_path).absolute()}" - - @classmethod - def upload_result_files_to_s3(cls, result): - if result.results: - for result_ in result.results: - cls.upload_result_files_to_s3(result_) - for file in result.files: - if not Path(file).is_file(): - print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") - result.info += f"\nWARNING: Result file [{file}] was not found" - file_link = cls._upload_file_to_s3(file, upload_to_s3=False) - else: - is_text = False - for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: - if file.endswith(text_file_suffix): - print( - f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" - ) - is_text = True - break - file_link = cls._upload_file_to_s3( - file, - upload_to_s3=True, - text=is_text, - s3_subprefix=Utils.normalize_string(result.name), - ) - result.links.append(file_link) - if result.files: - print( - f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" - ) - result.files = [] - result.dump() diff --git a/ci/praktika/settings.py b/ci/praktika/settings.py index 1a4068d9398..b281a95370c 100644 --- a/ci/praktika/settings.py +++ b/ci/praktika/settings.py @@ -1,8 +1,152 @@ -from praktika._settings import _Settings -from praktika.mangle import _get_user_settings +import dataclasses +import importlib.util +from pathlib import Path +from typing import Dict, Iterable, List, Optional -Settings = _Settings() -user_settings = _get_user_settings() -for setting, value in user_settings.items(): - Settings.__setattr__(setting, value) +@dataclasses.dataclass +class _Settings: + ###################################### + # Pipeline generation settings # + ###################################### + MAIN_BRANCH = "main" + CI_PATH = "./ci" + WORKFLOW_PATH_PREFIX: str = "./.github/workflows" + WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" + SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" + CI_CONFIG_JOB_NAME = "Config Workflow" + DOCKER_BUILD_JOB_NAME = "Docker Builds" + FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" + READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" + CI_CONFIG_RUNS_ON: Optional[List[str]] = None + DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None + VALIDATE_FILE_PATHS: bool = True + + ###################################### + # Runtime Settings # + ###################################### + MAX_RETRIES_S3 = 3 + MAX_RETRIES_GH = 3 + + ###################################### + # S3 (artifact storage) settings # + ###################################### + S3_ARTIFACT_PATH: str = "" + + ###################################### + # CI workspace settings # + ###################################### + TEMP_DIR: str = "/tmp/praktika" + OUTPUT_DIR: str = f"{TEMP_DIR}/output" + INPUT_DIR: str = f"{TEMP_DIR}/input" + PYTHON_INTERPRETER: str = "python3" + PYTHON_PACKET_MANAGER: str = "pip3" + PYTHON_VERSION: str = "3.9" + INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False + INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" + ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" + RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" + + SECRET_GH_APP_ID: str = "GH_APP_ID" + SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" + + ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" + WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" + + ###################################### + # CI Cache settings # + ###################################### + CACHE_VERSION: int = 1 + CACHE_DIGEST_LEN: int = 20 + CACHE_S3_PATH: str = "" + CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" + + ###################################### + # Report settings # + ###################################### + HTML_S3_PATH: str = "" + HTML_PAGE_FILE: str = "./praktika/json.html" + TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) + S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None + + DOCKERHUB_USERNAME: str = "" + DOCKERHUB_SECRET: str = "" + DOCKER_WD: str = "/wd" + + ###################################### + # CI DB Settings # + ###################################### + SECRET_CI_DB_URL: str = "CI_DB_URL" + SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" + CI_DB_DB_NAME = "" + CI_DB_TABLE_NAME = "" + CI_DB_INSERT_TIMEOUT_SEC = 5 + + DISABLE_MERGE_COMMIT = True + + +_USER_DEFINED_SETTINGS = [ + "S3_ARTIFACT_PATH", + "CACHE_S3_PATH", + "HTML_S3_PATH", + "S3_BUCKET_TO_HTTP_ENDPOINT", + "TEXT_CONTENT_EXTENSIONS", + "TEMP_DIR", + "OUTPUT_DIR", + "INPUT_DIR", + "CI_CONFIG_RUNS_ON", + "DOCKER_BUILD_RUNS_ON", + "CI_CONFIG_JOB_NAME", + "PYTHON_INTERPRETER", + "PYTHON_VERSION", + "PYTHON_PACKET_MANAGER", + "INSTALL_PYTHON_FOR_NATIVE_JOBS", + "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", + "MAX_RETRIES_S3", + "MAX_RETRIES_GH", + "VALIDATE_FILE_PATHS", + "DOCKERHUB_USERNAME", + "DOCKERHUB_SECRET", + "READY_FOR_MERGE_STATUS_NAME", + "SECRET_CI_DB_URL", + "SECRET_CI_DB_PASSWORD", + "CI_DB_DB_NAME", + "CI_DB_TABLE_NAME", + "CI_DB_INSERT_TIMEOUT_SEC", + "SECRET_GH_APP_PEM_KEY", + "SECRET_GH_APP_ID", + "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", +] + + +def _get_settings() -> _Settings: + res = _Settings() + + directory = Path(_Settings.SETTINGS_DIRECTORY) + for py_file in directory.glob("*.py"): + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + for setting in _USER_DEFINED_SETTINGS: + try: + value = getattr(foo, setting) + res.__setattr__(setting, value) + # print(f"- read user defined setting [{setting} = {value}]") + except Exception as e: + # print(f"Exception while read user settings: {e}") + pass + + return res + + +class GHRunners: + ubuntu = "ubuntu-latest" + + +Settings = _get_settings() diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index 62eb13b3e19..2bcc94f2559 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -17,8 +17,6 @@ from threading import Thread from types import SimpleNamespace from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union -from praktika._settings import _Settings - T = TypeVar("T", bound="Serializable") diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index d612881b819..0bb722903e5 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -4,10 +4,8 @@ from itertools import chain from pathlib import Path from praktika import Workflow -from praktika._settings import GHRunners from praktika.mangle import _get_workflows -from praktika.settings import Settings -from praktika.utils import ContextManager +from praktika.settings import GHRunners, Settings class Validator: @@ -168,9 +166,7 @@ class Validator: "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name - ) + cls.evaluate_check(path.is_file(), message, job.name, workflow.name) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 0d505ae27c4..707babb1250 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -68,6 +68,7 @@ stateless_tests_jobs = Job.Config( name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + # many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ From c5817d528c63c0a28bf373cbd46e072dcb8e254b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 11 Nov 2024 20:38:33 +0000 Subject: [PATCH 159/267] add test for const adaptive granularity --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 4 +- .../MergeTree/IMergeTreeDataPartWriter.cpp | 7 +-- src/Storages/MergeTree/MergeTreeData.cpp | 3 +- .../MergeTree/MergeTreeDataPartCompact.cpp | 2 +- .../MergeTree/MergeTreeDataPartWide.cpp | 3 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 7 ++- .../MergeTree/MergeTreeIndexGranularity.cpp | 4 +- .../MergeTree/MergeTreeIndexGranularity.h | 3 +- .../MergeTreeIndexGranularityAdaptive.cpp | 6 +-- .../MergeTreeIndexGranularityAdaptive.h | 1 + .../MergeTreeIndexGranularityConstant.cpp | 2 - .../MergeTreeIndexGranularityConstant.h | 1 + src/Storages/MergeTree/MergeTreeSettings.cpp | 4 +- .../MergeTree/MergedBlockOutputStream.cpp | 6 +-- ...const_adaptive_index_granularity.reference | 54 +++++++++++++++++++ ...03262_const_adaptive_index_granularity.sql | 53 ++++++++++++++++++ 16 files changed, 129 insertions(+), 31 deletions(-) create mode 100644 tests/queries/0_stateless/03262_const_adaptive_index_granularity.reference create mode 100644 tests/queries/0_stateless/03262_const_adaptive_index_granularity.sql diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 5657dbebeb1..03e7ad7aee0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include #include @@ -30,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -39,8 +39,6 @@ #include #include #include -#include "Storages/MergeTree/MergeTreeIndexGranularity.h" -#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" #include diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index 0e70310675f..e0070dc2349 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -1,11 +1,6 @@ -#include -#include #include #include -#include "Storages/MergeTree/MergeTreeIndexGranularity.h" -#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" -#include "Storages/MergeTree/MergeTreeIndexGranularityConstant.h" -#include "Storages/MergeTree/MergeTreeSettings.h" +#include #include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index aaae97d7d3e..d745b428061 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -21,7 +21,6 @@ #include #include #include -#include "Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h" #include #include #include @@ -84,6 +83,7 @@ #include #include #include +#include #include #include @@ -96,7 +96,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index bc10f119c01..d07806a65c9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index 0330068fd25..8852551bee9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -3,9 +3,10 @@ #include #include #include +#include +#include #include #include -#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index ba01fe889fb..11e83cba036 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -273,7 +273,12 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm /// but not in case of vertical part of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block_to_write); + size_t index_granularity_for_block; + if (auto constant_granularity = index_granularity->getConstantGranularity()) + index_granularity_for_block = *constant_granularity; + else + index_granularity_for_block = computeIndexGranularity(block_to_write); + if (rows_written_in_last_mark > 0) { size_t rows_left_in_last_mark = index_granularity->getMarkRows(getCurrentMark()) - rows_written_in_last_mark; diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 9696beac74e..edd74c4a55d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -1,11 +1,9 @@ -#include #include #include #include #include +#include #include -#include "Common/Exception.h" -#include "Storages/MergeTree/MergeTreeDataPartType.h" namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index 3fd59ce349d..0b1a7ee5bbc 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -1,7 +1,6 @@ #pragma once #include #include -#include "Storages/MergeTree/MergeTreeSettings.h" namespace DB { @@ -18,6 +17,8 @@ public: MergeTreeIndexGranularity() = default; virtual ~MergeTreeIndexGranularity() = default; + virtual std::optional getConstantGranularity() const = 0; + /// Return count of rows between marks virtual size_t getRowsCountInRange(size_t begin, size_t end) const = 0; /// Return count of rows between marks diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp index 358547fe6fe..79f6bb5c050 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.cpp @@ -1,9 +1,5 @@ -#include #include -#include -#include "Common/Logger.h" -#include "Common/logger_useful.h" -#include "Storages/MergeTree/MergeTreeIndexGranularityConstant.h" +#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h index a9b5711dd34..ca9ef4c69ef 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityAdaptive.h @@ -10,6 +10,7 @@ public: MergeTreeIndexGranularityAdaptive() = default; explicit MergeTreeIndexGranularityAdaptive(const std::vector & marks_rows_partial_sums_); + std::optional getConstantGranularity() const override { return {}; } size_t getRowsCountInRange(size_t begin, size_t end) const override; size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const override; size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const override; diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp index 9db8ce5d199..9b0ec25234b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.cpp @@ -1,6 +1,4 @@ #include -#include -#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h index 3fd5263c5ab..aeb704e5954 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityConstant.h @@ -18,6 +18,7 @@ public: explicit MergeTreeIndexGranularityConstant(size_t constant_granularity_); MergeTreeIndexGranularityConstant(size_t constant_granularity_, size_t last_mark_granularity_, size_t num_marks_without_final_, bool has_final_mark_); + std::optional getConstantGranularity() const override { return constant_granularity; } size_t getRowsCountInRange(size_t begin, size_t end) const override; size_t countMarksForRows(size_t from_mark, size_t number_of_rows) const override; size_t countRowsForRows(size_t from_mark, size_t number_of_rows, size_t offset_in_rows) const override; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 7d826246ed2..28ae933c3de 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -187,8 +187,8 @@ namespace ErrorCodes DECLARE(UInt64, min_merge_bytes_to_use_direct_io, 10ULL * 1024 * 1024 * 1024, "Minimal amount of bytes to enable O_DIRECT in merge (0 - disabled).", 0) \ DECLARE(UInt64, index_granularity_bytes, 10 * 1024 * 1024, "Approximate amount of bytes in single granule (0 - disabled).", 0) \ DECLARE(UInt64, min_index_granularity_bytes, 1024, "Minimum amount of bytes in single granule.", 1024) \ - DECLARE(Bool, use_const_adaptive_granularity, false, "KEK KEK KEK KEK KEK KEK", 0) \ - DECLARE(Bool, enable_index_granularity_compression, true, "KEK KEK KEK KEK KEK KEK", 0) \ + DECLARE(Bool, use_const_adaptive_granularity, false, "Always use constant granularity for whole part. It allows to compress in memory values of index granularity. It can be useful in extremely large workloads with thin tables.", 0) \ + DECLARE(Bool, enable_index_granularity_compression, true, "Compress in memory values of index granularity if it is possible", 0) \ DECLARE(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \ DECLARE(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \ DECLARE(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \ diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index a585a95f036..7a3baee6a2d 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -1,11 +1,9 @@ -#include #include +#include #include #include #include #include -#include "Common/Logger.h" -#include #include @@ -19,7 +17,7 @@ namespace ErrorCodes namespace MergeTreeSetting { - extern MergeTreeSettingsBool enable_index_granularity_compression; + extern const MergeTreeSettingsBool enable_index_granularity_compression; } MergedBlockOutputStream::MergedBlockOutputStream( diff --git a/tests/queries/0_stateless/03262_const_adaptive_index_granularity.reference b/tests/queries/0_stateless/03262_const_adaptive_index_granularity.reference new file mode 100644 index 00000000000..8f0ff59fdd1 --- /dev/null +++ b/tests/queries/0_stateless/03262_const_adaptive_index_granularity.reference @@ -0,0 +1,54 @@ +adaptive non-const, before merge +all_1_1_0 0 10 0 +all_1_1_0 1 5 10 +all_1_1_0 2 0 14 +all_2_2_0 0 2 15 +all_2_2_0 1 2 17 +all_2_2_0 2 2 19 +all_2_2_0 3 2 21 +all_2_2_0 4 2 23 +all_2_2_0 5 2 25 +all_2_2_0 6 2 27 +all_2_2_0 7 1 29 +all_2_2_0 8 0 29 +all_1_1_0 24 +all_2_2_0 72 +adaptive non-const, after merge +all_1_2_1 0 10 0 +all_1_2_1 1 5 10 +all_1_2_1 2 2 15 +all_1_2_1 3 2 17 +all_1_2_1 4 2 19 +all_1_2_1 5 2 21 +all_1_2_1 6 2 23 +all_1_2_1 7 2 25 +all_1_2_1 8 2 27 +all_1_2_1 9 1 29 +all_1_2_1 10 0 29 +all_1_2_1 88 +adaptive const, before merge +all_1_1_0 0 10 0 +all_1_1_0 1 5 10 +all_1_1_0 2 0 14 +all_2_2_0 0 2 15 +all_2_2_0 1 2 17 +all_2_2_0 2 2 19 +all_2_2_0 3 2 21 +all_2_2_0 4 2 23 +all_2_2_0 5 2 25 +all_2_2_0 6 2 27 +all_2_2_0 7 1 29 +all_2_2_0 8 0 29 +all_1_1_0 25 +all_2_2_0 25 +adaptive const, after merge +all_1_2_1 0 4 0 +all_1_2_1 1 4 4 +all_1_2_1 2 4 8 +all_1_2_1 3 4 12 +all_1_2_1 4 4 16 +all_1_2_1 5 4 20 +all_1_2_1 6 4 24 +all_1_2_1 7 2 28 +all_1_2_1 8 0 29 +all_1_2_1 25 diff --git a/tests/queries/0_stateless/03262_const_adaptive_index_granularity.sql b/tests/queries/0_stateless/03262_const_adaptive_index_granularity.sql new file mode 100644 index 00000000000..7445f66dc1a --- /dev/null +++ b/tests/queries/0_stateless/03262_const_adaptive_index_granularity.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t_index_granularity; + +CREATE TABLE t_index_granularity (id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + index_granularity = 10, + index_granularity_bytes = 4096, + merge_max_block_size = 10, + merge_max_block_size_bytes = 4096, + enable_index_granularity_compression = 1, + use_const_adaptive_granularity = 0, + enable_vertical_merge_algorithm = 0; + +INSERT INTO t_index_granularity SELECT number, 'a' FROM numbers(15); +INSERT INTO t_index_granularity SELECT number, repeat('a', 2048) FROM numbers(15, 15); + +SELECT 'adaptive non-const, before merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +OPTIMIZE TABLE t_index_granularity FINAL; + +SELECT 'adaptive non-const, after merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +DROP TABLE t_index_granularity; + +CREATE TABLE t_index_granularity (id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + index_granularity = 10, + index_granularity_bytes = 4096, + merge_max_block_size = 10, + merge_max_block_size_bytes = 4096, + enable_index_granularity_compression = 1, + use_const_adaptive_granularity = 1, + enable_vertical_merge_algorithm = 0; + +INSERT INTO t_index_granularity SELECT number, 'a' FROM numbers(15); +INSERT INTO t_index_granularity SELECT number, repeat('a', 2048) FROM numbers(15, 15); + +SELECT 'adaptive const, before merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +OPTIMIZE TABLE t_index_granularity FINAL; + +SELECT 'adaptive const, after merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +DROP TABLE t_index_granularity; From 7310376413e34ad2c958c5ccde9cddd75e0a5aed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 12 Nov 2024 01:23:01 +0100 Subject: [PATCH 160/267] Remove ridiculous code bloat --- .../AggregateFunctionDeltaSumTimestamp.cpp | 69 ++++++++++++++---- src/AggregateFunctions/Helpers.h | 70 +------------------ 2 files changed, 58 insertions(+), 81 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index dc1adead87c..0c5b752b539 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -22,6 +22,13 @@ namespace ErrorCodes namespace { +/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations + * over all pairs of data types, and we deeply regret that. + * + * We cannot remove all combinations, because the binary representation of serialized data has to remain the same, + * but we can partially heal the wound by treating unsigned and signed data types in the same way. + */ + template struct AggregationFunctionDeltaSumTimestampData { @@ -37,23 +44,22 @@ template class AggregationFunctionDeltaSumTimestamp final : public IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - > + AggregationFunctionDeltaSumTimestamp> { public: AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{arguments, params, createResultType()} - {} + AggregationFunctionDeltaSumTimestamp>{arguments, params, createResultType()} + { + } AggregationFunctionDeltaSumTimestamp() : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{} - {} + AggregationFunctionDeltaSumTimestamp>{} + { + } bool allocatesMemoryInArena() const override { return false; } @@ -63,8 +69,8 @@ public: void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - auto value = assert_cast &>(*columns[0]).getData()[row_num]; - auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + auto value = unalignedLoad(columns[0]->getRawData().data() + row_num * sizeof(ValueType)); + auto ts = unalignedLoad(columns[1]->getRawData().data() + row_num * sizeof(TimestampType)); auto & data = this->data(place); @@ -172,10 +178,49 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).sum); + static_cast(to).template insertRawData( + reinterpret_cast(&this->data(place).sum)); } }; + + +template class AggregateFunctionTemplate, typename... TArgs> +static IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); + + if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(args...); + + return nullptr; +} + +template