diff --git a/.gitmodules b/.gitmodules index 19f93ee8270..eb21c4bfd00 100644 --- a/.gitmodules +++ b/.gitmodules @@ -37,7 +37,7 @@ url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git [submodule "contrib/jemalloc"] path = contrib/jemalloc - url = https://github.com/jemalloc/jemalloc.git + url = https://github.com/ClickHouse-Extras/jemalloc.git [submodule "contrib/unixodbc"] path = contrib/unixodbc url = https://github.com/ClickHouse-Extras/UnixODBC.git diff --git a/CMakeLists.txt b/CMakeLists.txt index cf97b2c40ff..14f1fcb4a64 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,6 +80,11 @@ endif () include (cmake/find/ccache.cmake) +option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF) +if (ENABLE_CHECK_HEAVY_BUILDS) + set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --rss=10000000 --cpu=600) +endif () + if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None") set (CMAKE_BUILD_TYPE "RelWithDebInfo") message (STATUS "CMAKE_BUILD_TYPE is not set, set to default = ${CMAKE_BUILD_TYPE}") @@ -404,7 +409,6 @@ include (cmake/find/amqpcpp.cmake) include (cmake/find/capnp.cmake) include (cmake/find/llvm.cmake) include (cmake/find/termcap.cmake) # for external static llvm -include (cmake/find/opencl.cmake) include (cmake/find/h3.cmake) include (cmake/find/libxml2.cmake) include (cmake/find/brotli.cmake) @@ -450,13 +454,6 @@ include (cmake/find/mysqlclient.cmake) # When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc. -if (USE_OPENCL) - if (OS_DARWIN) - set(OPENCL_LINKER_FLAGS "-framework OpenCL") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OPENCL_LINKER_FLAGS}") - endif () -endif () - include (cmake/print_flags.cmake) if (TARGET global-group) diff --git a/base/common/CMakeLists.txt b/base/common/CMakeLists.txt index 903168a0dd4..9e4462c821a 100644 --- a/base/common/CMakeLists.txt +++ b/base/common/CMakeLists.txt @@ -18,6 +18,7 @@ set (SRCS terminalColors.cpp errnoToString.cpp getResource.cpp + StringRef.cpp ) if (ENABLE_REPLXX) diff --git a/base/common/StringRef.cpp b/base/common/StringRef.cpp new file mode 100644 index 00000000000..87877360d83 --- /dev/null +++ b/base/common/StringRef.cpp @@ -0,0 +1,13 @@ +#include + +#include "StringRef.h" + + +std::ostream & operator<<(std::ostream & os, const StringRef & str) +{ + if (str.data) + os.write(str.data, str.size); + + return os; +} + diff --git a/base/common/StringRef.h b/base/common/StringRef.h index 410e13ba7d8..05d4eda7656 100644 --- a/base/common/StringRef.h +++ b/base/common/StringRef.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include @@ -322,10 +322,4 @@ inline bool operator==(StringRef lhs, const char * rhs) return true; } -inline std::ostream & operator<<(std::ostream & os, const StringRef & str) -{ - if (str.data) - os.write(str.data, str.size); - - return os; -} +std::ostream & operator<<(std::ostream & os, const StringRef & str); diff --git a/base/common/coverage.cpp b/base/common/coverage.cpp index 9f3c5ca653a..043f97f9593 100644 --- a/base/common/coverage.cpp +++ b/base/common/coverage.cpp @@ -3,12 +3,11 @@ #if WITH_COVERAGE # include - # include # if defined(__clang__) -extern "C" void __llvm_profile_dump(); +extern "C" void __llvm_profile_dump(); // NOLINT # elif defined(__GNUC__) || defined(__GNUG__) extern "C" void __gcov_exit(); # endif @@ -23,7 +22,7 @@ void dumpCoverageReportIfPossible() std::lock_guard lock(mutex); # if defined(__clang__) - __llvm_profile_dump(); + __llvm_profile_dump(); // NOLINT # elif defined(__GNUC__) || defined(__GNUG__) __gcov_exit(); # endif diff --git a/base/common/phdr_cache.cpp b/base/common/phdr_cache.cpp index f362fb64285..4f6a066adab 100644 --- a/base/common/phdr_cache.cpp +++ b/base/common/phdr_cache.cpp @@ -14,7 +14,7 @@ # pragma clang diagnostic ignored "-Wunused-macros" #endif -#define __msan_unpoison(X, Y) +#define __msan_unpoison(X, Y) // NOLINT #if defined(__has_feature) # if __has_feature(memory_sanitizer) # undef __msan_unpoison @@ -84,7 +84,7 @@ extern "C" #ifdef ADDRESS_SANITIZER void __lsan_ignore_object(const void *); #else -void __lsan_ignore_object(const void *) {} +void __lsan_ignore_object(const void *) {} // NOLINT #endif } diff --git a/base/common/wide_integer.h b/base/common/wide_integer.h index 2aeac072b3f..61d88bdcaf3 100644 --- a/base/common/wide_integer.h +++ b/base/common/wide_integer.h @@ -54,8 +54,8 @@ template class integer { public: - using base_type = uint8_t; - using signed_base_type = int8_t; + using base_type = uint64_t; + using signed_base_type = int64_t; // ctors integer() = default; @@ -127,7 +127,7 @@ private: friend class std::numeric_limits>; friend class std::numeric_limits>; - base_type m_arr[_impl::arr_size]; + base_type items[_impl::item_count]; }; template diff --git a/base/common/wide_integer_impl.h b/base/common/wide_integer_impl.h index 26bd6704bdc..5673ac46c4a 100644 --- a/base/common/wide_integer_impl.h +++ b/base/common/wide_integer_impl.h @@ -3,10 +3,6 @@ #include "throwError.h" -#ifndef CHAR_BIT -#define CHAR_BIT 8 -#endif - namespace wide { @@ -74,7 +70,7 @@ public: { using T = wide::integer; T res{}; - res.m_arr[T::_impl::big(0)] = std::numeric_limits::signed_base_type>::min(); + res.items[T::_impl::big(0)] = std::numeric_limits::signed_base_type>::min(); return res; } return 0; @@ -84,12 +80,12 @@ public: { using T = wide::integer; T res{}; - res.m_arr[T::_impl::big(0)] = is_same::value + res.items[T::_impl::big(0)] = is_same::value ? std::numeric_limits::signed_base_type>::max() : std::numeric_limits::base_type>::max(); - for (int i = 1; i < wide::integer::_impl::arr_size; ++i) + for (unsigned i = 1; i < wide::integer::_impl::item_count; ++i) { - res.m_arr[T::_impl::big(i)] = std::numeric_limits::base_type>::max(); + res.items[T::_impl::big(i)] = std::numeric_limits::base_type>::max(); } return res; } @@ -147,28 +143,45 @@ namespace wide template struct integer::_impl { - static_assert(Bits % CHAR_BIT == 0, "=)"); - - // utils - static const int base_bits = sizeof(base_type) * CHAR_BIT; - static const int arr_size = Bits / base_bits; static constexpr size_t _Bits = Bits; - static constexpr bool _is_wide_integer = true; + static constexpr const unsigned byte_count = Bits / 8; + static constexpr const unsigned item_count = byte_count / sizeof(base_type); + static constexpr const unsigned base_bits = sizeof(base_type) * 8; - // The original implementation is big-endian. We need little one. + static_assert(Bits % base_bits == 0); + + /// Simple iteration in both directions static constexpr unsigned little(unsigned idx) { return idx; } - static constexpr unsigned big(unsigned idx) { return arr_size - 1 - idx; } + static constexpr unsigned big(unsigned idx) { return item_count - 1 - idx; } static constexpr unsigned any(unsigned idx) { return idx; } + template + constexpr static bool is_negative(const T & n) noexcept + { + if constexpr (std::is_signed_v) + return n < 0; + else + return false; + } + template constexpr static bool is_negative(const integer & n) noexcept { if constexpr (std::is_same_v) - return static_cast(n.m_arr[big(0)]) < 0; + return static_cast(n.items[big(0)]) < 0; else return false; } + template + constexpr static auto make_positive(const T & n) noexcept + { + if constexpr (std::is_signed_v) + return n < 0 ? -n : n; + else + return n; + } + template constexpr static integer make_positive(const integer & n) noexcept { @@ -189,21 +202,24 @@ struct integer::_impl template constexpr static void wide_integer_from_bultin(integer & self, Integral rhs) noexcept { - auto r = _impl::to_Integral(rhs); + self.items[0] = _impl::to_Integral(rhs); + if constexpr (std::is_same_v) + self.items[1] = rhs >> base_bits; - int r_idx = 0; - for (; static_cast(r_idx) < sizeof(Integral) && r_idx < arr_size; ++r_idx) + constexpr const unsigned start = (sizeof(Integral) == 16) ? 2 : 1; + + if constexpr (std::is_signed_v) { - base_type & curr = self.m_arr[little(r_idx)]; - base_type curr_rhs = (r >> (r_idx * CHAR_BIT)) & std::numeric_limits::max(); - curr = curr_rhs; + if (rhs < 0) + { + for (unsigned i = start; i < item_count; ++i) + self.items[i] = -1; + return; + } } - for (; r_idx < arr_size; ++r_idx) - { - base_type & curr = self.m_arr[little(r_idx)]; - curr = r < 0 ? std::numeric_limits::max() : 0; - } + for (unsigned i = start; i < item_count; ++i) + self.items[i] = 0; } constexpr static void wide_integer_from_bultin(integer & self, double rhs) noexcept @@ -234,170 +250,142 @@ struct integer::_impl constexpr static void wide_integer_from_wide_integer(integer & self, const integer & rhs) noexcept { - // int Bits_to_copy = std::min(arr_size, rhs.arr_size); - auto rhs_arr_size = integer::_impl::arr_size; - int base_elems_to_copy = _impl::arr_size < rhs_arr_size ? _impl::arr_size : rhs_arr_size; - for (int i = 0; i < base_elems_to_copy; ++i) + constexpr const unsigned min_bits = (Bits < Bits2) ? Bits : Bits2; + constexpr const unsigned to_copy = min_bits / base_bits; + + for (unsigned i = 0; i < to_copy; ++i) + self.items[i] = rhs.items[i]; + + if constexpr (Bits > Bits2) { - self.m_arr[little(i)] = rhs.m_arr[little(i)]; - } - for (int i = 0; i < arr_size - base_elems_to_copy; ++i) - { - self.m_arr[big(i)] = is_negative(rhs) ? std::numeric_limits::max() : 0; + if constexpr (std::is_signed_v) + { + if (rhs < 0) + { + for (unsigned i = to_copy; i < item_count; ++i) + self.items[i] = -1; + return; + } + } + + for (unsigned i = to_copy; i < item_count; ++i) + self.items[i] = 0; } } template constexpr static bool should_keep_size() { - return sizeof(T) * CHAR_BIT <= Bits; + return sizeof(T) <= byte_count; } - constexpr static integer shift_left(const integer & rhs, int n) noexcept + constexpr static integer shift_left(const integer & rhs, unsigned n) noexcept { - if (static_cast(n) >= base_bits * arr_size) - return 0; - if (n <= 0) - return rhs; + integer lhs; + unsigned items_shift = n / base_bits; - integer lhs = rhs; - int bit_shift = n % base_bits; - unsigned n_bytes = n / base_bits; - if (bit_shift) + if (unsigned bit_shift = n % base_bits) { - lhs.m_arr[big(0)] <<= bit_shift; - for (int i = 1; i < arr_size; ++i) + unsigned overflow_shift = base_bits - bit_shift; + + lhs.items[big(0)] = rhs.items[big(items_shift)] << bit_shift; + for (unsigned i = 1; i < item_count - items_shift; ++i) { - lhs.m_arr[big(i - 1)] |= lhs.m_arr[big(i)] >> (base_bits - bit_shift); - lhs.m_arr[big(i)] <<= bit_shift; + lhs.items[big(i - 1)] |= rhs.items[big(items_shift + i)] >> overflow_shift; + lhs.items[big(i)] = rhs.items[big(items_shift + i)] << bit_shift; } } - if (n_bytes) - { - for (unsigned i = 0; i < arr_size - n_bytes; ++i) - { - lhs.m_arr[big(i)] = lhs.m_arr[big(i + n_bytes)]; - } - for (unsigned i = arr_size - n_bytes; i < arr_size; ++i) - lhs.m_arr[big(i)] = 0; - } - return lhs; - } - - constexpr static integer shift_left(const integer & rhs, int n) noexcept - { - return integer(shift_left(integer(rhs), n)); - } - - constexpr static integer shift_right(const integer & rhs, int n) noexcept - { - if (static_cast(n) >= base_bits * arr_size) - return 0; - if (n <= 0) - return rhs; - - integer lhs = rhs; - int bit_shift = n % base_bits; - unsigned n_bytes = n / base_bits; - if (bit_shift) - { - lhs.m_arr[little(0)] >>= bit_shift; - for (int i = 1; i < arr_size; ++i) - { - lhs.m_arr[little(i - 1)] |= lhs.m_arr[little(i)] << (base_bits - bit_shift); - lhs.m_arr[little(i)] >>= bit_shift; - } - } - if (n_bytes) - { - for (unsigned i = 0; i < arr_size - n_bytes; ++i) - { - lhs.m_arr[little(i)] = lhs.m_arr[little(i + n_bytes)]; - } - for (unsigned i = arr_size - n_bytes; i < arr_size; ++i) - lhs.m_arr[little(i)] = 0; - } - return lhs; - } - - constexpr static integer shift_right(const integer & rhs, int n) noexcept - { - if (static_cast(n) >= base_bits * arr_size) - return 0; - if (n <= 0) - return rhs; - - bool is_neg = is_negative(rhs); - if (!is_neg) - return shift_right(integer(rhs), n); - - integer lhs = rhs; - int bit_shift = n % base_bits; - unsigned n_bytes = n / base_bits; - if (bit_shift) - { - lhs = shift_right(integer(lhs), bit_shift); - lhs.m_arr[big(0)] |= std::numeric_limits::max() << (base_bits - bit_shift); - } - if (n_bytes) - { - for (unsigned i = 0; i < arr_size - n_bytes; ++i) - { - lhs.m_arr[little(i)] = lhs.m_arr[little(i + n_bytes)]; - } - for (unsigned i = arr_size - n_bytes; i < arr_size; ++i) - { - lhs.m_arr[little(i)] = std::numeric_limits::max(); - } - } - return lhs; - } - - template - constexpr static integer - operator_plus_T(const integer & lhs, T rhs) noexcept(std::is_same_v) - { - if (rhs < 0) - return _operator_minus_T(lhs, -rhs); else - return _operator_plus_T(lhs, rhs); + { + for (unsigned i = 0; i < item_count - items_shift; ++i) + lhs.items[big(i)] = rhs.items[big(items_shift + i)]; + } + + for (unsigned i = 0; i < items_shift; ++i) + lhs.items[little(i)] = 0; + return lhs; + } + + constexpr static integer shift_right(const integer & rhs, unsigned n) noexcept + { + integer lhs; + unsigned items_shift = n / base_bits; + unsigned bit_shift = n % base_bits; + + if (bit_shift) + { + unsigned overflow_shift = base_bits - bit_shift; + + lhs.items[little(0)] = rhs.items[little(items_shift)] >> bit_shift; + for (unsigned i = 1; i < item_count - items_shift; ++i) + { + lhs.items[little(i - 1)] |= rhs.items[little(items_shift + i)] << overflow_shift; + lhs.items[little(i)] = rhs.items[little(items_shift + i)] >> bit_shift; + } + } + else + { + for (unsigned i = 0; i < item_count - items_shift; ++i) + lhs.items[little(i)] = rhs.items[little(items_shift + i)]; + } + + if (is_negative(rhs)) + { + if (bit_shift) + lhs.items[big(items_shift)] |= std::numeric_limits::max() << (base_bits - bit_shift); + + for (unsigned i = item_count - items_shift; i < items_shift; ++i) + lhs.items[little(i)] = std::numeric_limits::max(); + } + else + { + for (unsigned i = item_count - items_shift; i < items_shift; ++i) + lhs.items[little(i)] = 0; + } + + return lhs; } private: template - constexpr static integer - _operator_minus_T(const integer & lhs, T rhs) noexcept(std::is_same_v) + constexpr static base_type get_item(const T & x, unsigned number) { - integer res = lhs; + if constexpr (IsWideInteger::value) + { + if (number < T::_impl::item_count) + return x.items[number]; + return 0; + } + else + { + if (number * sizeof(base_type) < sizeof(T)) + return x >> (number * base_bits); // & std::numeric_limits::max() + return 0; + } + } + + template + constexpr static integer + op_minus(const integer & lhs, T rhs) + { + integer res; bool is_underflow = false; - int r_idx = 0; - for (; static_cast(r_idx) < sizeof(T) && r_idx < arr_size; ++r_idx) + for (unsigned i = 0; i < item_count; ++i) { - base_type & res_i = res.m_arr[little(r_idx)]; - base_type curr_rhs = (rhs >> (r_idx * CHAR_BIT)) & std::numeric_limits::max(); + base_type lhs_item = lhs.items[little(i)]; + base_type rhs_item = get_item(rhs, i); if (is_underflow) { - --res_i; - is_underflow = res_i == std::numeric_limits::max(); + is_underflow = (lhs_item == 0); + --lhs_item; } - if (res_i < curr_rhs) + if (lhs_item < rhs_item) is_underflow = true; - res_i -= curr_rhs; - } - if (is_underflow && r_idx < arr_size) - { - --res.m_arr[little(r_idx)]; - for (int i = arr_size - 1 - r_idx - 1; i >= 0; --i) - { - if (res.m_arr[big(i + 1)] == std::numeric_limits::max()) - --res.m_arr[big(i)]; - else - break; - } + res.items[little(i)] = lhs_item - rhs_item; } return res; @@ -405,37 +393,69 @@ private: template constexpr static integer - _operator_plus_T(const integer & lhs, T rhs) noexcept(std::is_same_v) + op_plus(const integer & lhs, T rhs) { - integer res = lhs; + integer res; bool is_overflow = false; - int r_idx = 0; - for (; static_cast(r_idx) < sizeof(T) && r_idx < arr_size; ++r_idx) + for (unsigned i = 0; i < item_count; ++i) { - base_type & res_i = res.m_arr[little(r_idx)]; - base_type curr_rhs = (rhs >> (r_idx * CHAR_BIT)) & std::numeric_limits::max(); + base_type lhs_item = lhs.items[little(i)]; + base_type rhs_item = get_item(rhs, i); if (is_overflow) { - ++res_i; - is_overflow = res_i == 0; + ++lhs_item; + is_overflow = (lhs_item == 0); } - res_i += curr_rhs; - if (res_i < curr_rhs) + base_type & res_item = res.items[little(i)]; + res_item = lhs_item + rhs_item; + + if (res_item < rhs_item) is_overflow = true; } - if (is_overflow && r_idx < arr_size) + return res; + } + + template + constexpr static auto op_multiply(const integer & lhs, const T & rhs) + { + integer res{}; +#if 1 + integer lhs2 = op_plus(lhs, shift_left(lhs, 1)); + integer lhs3 = op_plus(lhs2, shift_left(lhs, 2)); +#endif + for (unsigned i = 0; i < item_count; ++i) { - ++res.m_arr[little(r_idx)]; - for (int i = arr_size - 1 - r_idx - 1; i >= 0; --i) + base_type rhs_item = get_item(rhs, i); + unsigned pos = i * base_bits; + + while (rhs_item) { - if (res.m_arr[big(i + 1)] == 0) - ++res.m_arr[big(i)]; - else - break; +#if 1 /// optimization + if ((rhs_item & 0x7) == 0x7) + { + res = op_plus(res, shift_left(lhs3, pos)); + rhs_item >>= 3; + pos += 3; + continue; + } + + if ((rhs_item & 0x3) == 0x3) + { + res = op_plus(res, shift_left(lhs2, pos)); + rhs_item >>= 2; + pos += 2; + continue; + } +#endif + if (rhs_item & 1) + res = op_plus(res, shift_left(lhs, pos)); + + rhs_item >>= 1; + ++pos; } } @@ -445,17 +465,17 @@ private: public: constexpr static integer operator_unary_tilda(const integer & lhs) noexcept { - integer res{}; + integer res; - for (int i = 0; i < arr_size; ++i) - res.m_arr[any(i)] = ~lhs.m_arr[any(i)]; + for (unsigned i = 0; i < item_count; ++i) + res.items[any(i)] = ~lhs.items[any(i)]; return res; } constexpr static integer operator_unary_minus(const integer & lhs) noexcept(std::is_same_v) { - return operator_plus_T(operator_unary_tilda(lhs), 1); + return op_plus(operator_unary_tilda(lhs), 1); } template @@ -463,15 +483,14 @@ public: { if constexpr (should_keep_size()) { - integer t = rhs; - if (is_negative(t)) - return _operator_minus_wide_integer(lhs, operator_unary_minus(t)); + if (is_negative(rhs)) + return op_minus(lhs, -rhs); else - return _operator_plus_wide_integer(lhs, t); + return op_plus(lhs, rhs); } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, integer>::_impl::operator_plus( integer(lhs), rhs); } @@ -482,100 +501,44 @@ public: { if constexpr (should_keep_size()) { - integer t = rhs; - if (is_negative(t)) - return _operator_plus_wide_integer(lhs, operator_unary_minus(t)); + if (is_negative(rhs)) + return op_plus(lhs, -rhs); else - return _operator_minus_wide_integer(lhs, t); + return op_minus(lhs, rhs); } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, integer>::_impl::operator_minus( integer(lhs), rhs); } } -private: - constexpr static integer _operator_minus_wide_integer( - const integer & lhs, const integer & rhs) noexcept(std::is_same_v) - { - integer res = lhs; - - bool is_underflow = false; - for (int idx = 0; idx < arr_size; ++idx) - { - base_type & res_i = res.m_arr[little(idx)]; - const base_type rhs_i = rhs.m_arr[little(idx)]; - - if (is_underflow) - { - --res_i; - is_underflow = res_i == std::numeric_limits::max(); - } - - if (res_i < rhs_i) - is_underflow = true; - - res_i -= rhs_i; - } - - return res; - } - - constexpr static integer _operator_plus_wide_integer( - const integer & lhs, const integer & rhs) noexcept(std::is_same_v) - { - integer res = lhs; - - bool is_overflow = false; - for (int idx = 0; idx < arr_size; ++idx) - { - base_type & res_i = res.m_arr[little(idx)]; - const base_type rhs_i = rhs.m_arr[little(idx)]; - - if (is_overflow) - { - ++res_i; - is_overflow = res_i == 0; - } - - res_i += rhs_i; - - if (res_i < rhs_i) - is_overflow = true; - } - - return res; - } - -public: template constexpr static auto operator_star(const integer & lhs, const T & rhs) { if constexpr (should_keep_size()) { - const integer a = make_positive(lhs); - integer t = make_positive(integer(rhs)); + integer res; - integer res = 0; - - for (size_t i = 0; i < arr_size * base_bits; ++i) + if constexpr (std::is_signed_v) { - if (t.m_arr[little(0)] & 1) - res = operator_plus(res, shift_left(a, i)); - - t = shift_right(t, 1); + res = op_multiply((is_negative(lhs) ? make_positive(lhs) : lhs), + (is_negative(rhs) ? make_positive(rhs) : rhs)); + } + else + { + res = op_multiply(lhs, (is_negative(rhs) ? make_positive(rhs) : rhs)); } - if (std::is_same_v && is_negative(integer(rhs)) != is_negative(lhs)) + if (std::is_same_v && is_negative(lhs) != is_negative(rhs)) res = operator_unary_minus(res); return res; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_star(T(lhs), rhs); } } @@ -585,25 +548,22 @@ public: { if constexpr (should_keep_size()) { - // static_assert(Signed == std::is_signed::value, - // "warning: operator_more: comparison of integers of different signs"); + if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(rhs))) + return is_negative(rhs); - integer t = rhs; - - if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(t))) - return is_negative(t); - - for (int i = 0; i < arr_size; ++i) + for (unsigned i = 0; i < item_count; ++i) { - if (lhs.m_arr[big(i)] != t.m_arr[big(i)]) - return lhs.m_arr[big(i)] > t.m_arr[big(i)]; + base_type rhs_item = get_item(rhs, big(i)); + + if (lhs.items[big(i)] != rhs_item) + return lhs.items[big(i)] > rhs_item; } return false; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_more(T(lhs), rhs); } } @@ -613,23 +573,22 @@ public: { if constexpr (should_keep_size()) { - // static_assert(Signed == std::is_signed::value, - // "warning: operator_less: comparison of integers of different signs"); - - integer t = rhs; - - if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(t))) + if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(rhs))) return is_negative(lhs); - for (int i = 0; i < arr_size; ++i) - if (lhs.m_arr[big(i)] != t.m_arr[big(i)]) - return lhs.m_arr[big(i)] < t.m_arr[big(i)]; + for (unsigned i = 0; i < item_count; ++i) + { + base_type rhs_item = get_item(rhs, big(i)); + + if (lhs.items[big(i)] != rhs_item) + return lhs.items[big(i)] < rhs_item; + } return false; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_less(T(lhs), rhs); } } @@ -639,17 +598,19 @@ public: { if constexpr (should_keep_size()) { - integer t = rhs; + for (unsigned i = 0; i < item_count; ++i) + { + base_type rhs_item = get_item(rhs, any(i)); - for (int i = 0; i < arr_size; ++i) - if (lhs.m_arr[any(i)] != t.m_arr[any(i)]) + if (lhs.items[any(i)] != rhs_item) return false; + } return true; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_eq(T(lhs), rhs); } } @@ -659,16 +620,15 @@ public: { if constexpr (should_keep_size()) { - integer t = rhs; - integer res = lhs; + integer res; - for (int i = 0; i < arr_size; ++i) - res.m_arr[any(i)] |= t.m_arr[any(i)]; + for (unsigned i = 0; i < item_count; ++i) + res.items[little(i)] = lhs.items[little(i)] | get_item(rhs, i); return res; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_pipe(T(lhs), rhs); } } @@ -678,43 +638,48 @@ public: { if constexpr (should_keep_size()) { - integer t = rhs; - integer res = lhs; + integer res; - for (int i = 0; i < arr_size; ++i) - res.m_arr[any(i)] &= t.m_arr[any(i)]; + for (unsigned i = 0; i < item_count; ++i) + res.items[little(i)] = lhs.items[little(i)] & get_item(rhs, i); return res; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, T>::_impl::operator_amp(T(lhs), rhs); } } private: template - constexpr static void divide(const T & lhserator, const T & denominator, T & quotient, T & remainder) + constexpr static bool is_zero(const T & x) { bool is_zero = true; - for (auto c : denominator.m_arr) + for (auto item : x.items) { - if (c != 0) + if (item != 0) { is_zero = false; break; } } + return is_zero; + } - if (is_zero) + /// returns quotient as result and remainder in numerator. + template + constexpr static T divide(T & numerator, T && denominator) + { + if (is_zero(denominator)) throwError("divide by zero"); - T n = lhserator; - T d = denominator; + T & n = numerator; + T & d = denominator; T x = 1; - T answer = 0; + T quotient = 0; - while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * arr_size - 1), 1), 0)) + while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * item_count - 1), 1), 0)) { x = shift_left(x, 1); d = shift_left(d, 1); @@ -725,15 +690,14 @@ private: if (!operator_more(d, n)) { n = operator_minus(n, d); - answer = operator_pipe(answer, x); + quotient = operator_pipe(quotient, x); } x = shift_right(x, 1); d = shift_right(d, 1); } - quotient = answer; - remainder = n; + return quotient; } public: @@ -742,18 +706,16 @@ public: { if constexpr (should_keep_size()) { - integer o = rhs; - integer quotient{}, remainder{}; - divide(make_positive(lhs), make_positive(o), quotient, remainder); + integer numerator = make_positive(lhs); + integer quotient = divide(numerator, make_positive(integer(rhs))); - if (std::is_same_v && is_negative(o) != is_negative(lhs)) + if (std::is_same_v && is_negative(rhs) != is_negative(lhs)) quotient = operator_unary_minus(quotient); - return quotient; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, integer>::operator_slash(T(lhs), rhs); } } @@ -763,18 +725,16 @@ public: { if constexpr (should_keep_size()) { - integer o = rhs; - integer quotient{}, remainder{}; - divide(make_positive(lhs), make_positive(o), quotient, remainder); + integer remainder = make_positive(lhs); + divide(remainder, make_positive(integer(rhs))); if (std::is_same_v && is_negative(lhs)) remainder = operator_unary_minus(remainder); - return remainder; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return std::common_type_t, integer>::operator_percent(T(lhs), rhs); } } @@ -788,13 +748,13 @@ public: integer t(rhs); integer res = lhs; - for (int i = 0; i < arr_size; ++i) - res.m_arr[any(i)] ^= t.m_arr[any(i)]; + for (unsigned i = 0; i < item_count; ++i) + res.items[any(i)] ^= t.items[any(i)]; return res; } else { - static_assert(T::_impl::_is_wide_integer, ""); + static_assert(IsWideInteger::value); return T::operator_circumflex(T(lhs), rhs); } } @@ -815,20 +775,20 @@ public: { if (*c >= '0' && *c <= '9') { - res = operator_star(res, 16U); - res = operator_plus_T(res, *c - '0'); + res = op_multiply(res, 16U); + res = op_plus(res, *c - '0'); ++c; } else if (*c >= 'a' && *c <= 'f') { - res = operator_star(res, 16U); - res = operator_plus_T(res, *c - 'a' + 10U); + res = op_multiply(res, 16U); + res = op_plus(res, *c - 'a' + 10U); ++c; } else if (*c >= 'A' && *c <= 'F') { // tolower must be used, but it is not constexpr - res = operator_star(res, 16U); - res = operator_plus_T(res, *c - 'A' + 10U); + res = op_multiply(res, 16U); + res = op_plus(res, *c - 'A' + 10U); ++c; } else @@ -842,8 +802,8 @@ public: if (*c < '0' || *c > '9') throwError("invalid char from"); - res = operator_star(res, 10U); - res = operator_plus_T(res, *c - '0'); + res = op_multiply(res, 10U); + res = op_plus(res, *c - '0'); ++c; } } @@ -860,7 +820,7 @@ public: template template constexpr integer::integer(T rhs) noexcept - : m_arr{} + : items{} { if constexpr (IsWideInteger::value) _impl::wide_integer_from_wide_integer(*this, rhs); @@ -871,7 +831,7 @@ constexpr integer::integer(T rhs) noexcept template template constexpr integer::integer(std::initializer_list il) noexcept - : m_arr{} + : items{} { if (il.size() == 1) { @@ -967,14 +927,25 @@ constexpr integer & integer::operator^=(const T & rh template constexpr integer & integer::operator<<=(int n) noexcept { - *this = _impl::shift_left(*this, n); + if (static_cast(n) >= Bits) + *this = 0; + else if (n > 0) + *this = _impl::shift_left(*this, n); return *this; } template constexpr integer & integer::operator>>=(int n) noexcept { - *this = _impl::shift_right(*this, n); + if (static_cast(n) >= Bits) + { + if (is_negative(*this)) + *this = -1; + else + *this = 0; + } + else if (n > 0) + *this = _impl::shift_right(*this, n); return *this; } @@ -1018,13 +989,16 @@ template template constexpr integer::operator T() const noexcept { - static_assert(std::numeric_limits::is_integer, ""); - T res = 0; - for (size_t r_idx = 0; r_idx < _impl::arr_size && r_idx < sizeof(T); ++r_idx) + if constexpr (std::is_same_v) { - res |= (T(m_arr[_impl::little(r_idx)]) << (_impl::base_bits * r_idx)); + static_assert(Bits >= 128); + return (__int128(items[1]) << 64) | items[0]; + } + else + { + static_assert(std::numeric_limits::is_integer); + return items[0]; } - return res; } template @@ -1038,12 +1012,12 @@ constexpr integer::operator long double() const noexcept tmp = -tmp; long double res = 0; - for (size_t idx = 0; idx < _impl::arr_size; ++idx) + for (unsigned i = 0; i < _impl::item_count; ++i) { long double t = res; res *= std::numeric_limits::max(); res += t; - res += tmp.m_arr[_impl::big(idx)]; + res += tmp.items[_impl::big(i)]; } if (_impl::is_negative(*this)) @@ -1187,11 +1161,19 @@ std::common_type_t constexpr operator^(const Integral & lhs template constexpr integer operator<<(const integer & lhs, int n) noexcept { + if (static_cast(n) >= Bits) + return 0; + if (n <= 0) + return lhs; return integer::_impl::shift_left(lhs, n); } template constexpr integer operator>>(const integer & lhs, int n) noexcept { + if (static_cast(n) >= Bits) + return 0; + if (n <= 0) + return lhs; return integer::_impl::shift_right(lhs, n); } @@ -1277,7 +1259,7 @@ struct hash> { static_assert(Bits % (sizeof(size_t) * 8) == 0); - const auto * ptr = reinterpret_cast(lhs.m_arr); + const auto * ptr = reinterpret_cast(lhs.items); unsigned count = Bits / (sizeof(size_t) * 8); size_t res = 0; diff --git a/base/common/ya.make b/base/common/ya.make index 2bd08afbf3a..cbb6b5f64ac 100644 --- a/base/common/ya.make +++ b/base/common/ya.make @@ -53,6 +53,7 @@ SRCS( setTerminalEcho.cpp shift10.cpp sleep.cpp + StringRef.cpp terminalColors.cpp ) diff --git a/benchmark/hardware.sh b/benchmark/hardware.sh new file mode 100755 index 00000000000..0ff71df19ae --- /dev/null +++ b/benchmark/hardware.sh @@ -0,0 +1,120 @@ +#!/bin/bash -e + +if [[ -n $1 ]]; then + SCALE=$1 +else + SCALE=100 +fi + +TABLE="hits_${SCALE}m_obfuscated" +DATASET="${TABLE}_v1.tar.xz" +QUERIES_FILE="queries.sql" +TRIES=3 + +AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse" +AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse" + +FASTER_DOWNLOAD=wget +if command -v axel >/dev/null; then + FASTER_DOWNLOAD=axel +else + echo "It's recommended to install 'axel' for faster downloads." +fi + +if command -v pixz >/dev/null; then + TAR_PARAMS='-Ipixz' +else + echo "It's recommended to install 'pixz' for faster decompression of the dataset." +fi + +mkdir -p clickhouse-benchmark-$SCALE +pushd clickhouse-benchmark-$SCALE + +if [[ ! -f clickhouse ]]; then + CPU=$(uname -m) + if [[ ($CPU == x86_64) || ($CPU == amd64) ]]; then + $FASTER_DOWNLOAD "$AMD64_BIN_URL" + elif [[ $CPU == aarch64 ]]; then + $FASTER_DOWNLOAD "$AARCH64_BIN_URL" + else + echo "Unsupported CPU type: $CPU" + exit 1 + fi +fi + +chmod a+x clickhouse + +if [[ ! -f $QUERIES_FILE ]]; then + wget "https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/$QUERIES_FILE" +fi + +if [[ ! -d data ]]; then + if [[ ! -f $DATASET ]]; then + $FASTER_DOWNLOAD "https://clickhouse-datasets.s3.yandex.net/hits/partitions/$DATASET" + fi + + tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET +fi + +echo "Starting clickhouse-server" + +./clickhouse server > server.log 2>&1 & +PID=$! + +function finish { + kill $PID + wait +} +trap finish EXIT + +echo "Waiting for clickhouse-server to start" + +for i in {1..30}; do + sleep 1 + ./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM $TABLE" 2>/dev/null && break || echo '.' + if [[ $i == 30 ]]; then exit 1; fi +done + +echo +echo "Will perform benchmark. Results:" +echo + +cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do + sync + echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null + + echo -n "[" + for i in $(seq 1 $TRIES); do + RES=$(./clickhouse client --max_memory_usage 100000000000 --time --format=Null --query="$query" 2>&1) + [[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null" + [[ "$i" != $TRIES ]] && echo -n ", " + done + echo "]," +done + + +echo +echo "Benchmark complete. System info:" +echo + +echo '----Version and build id--------' +./clickhouse local --query "SELECT version(), buildId()" +echo '----CPU-------------------------' +lscpu +echo '----Block Devices---------------' +lsblk +echo '----Disk Free and Total--------' +df -h . +echo '----Memory Free and Total-------' +free -h +echo '----Physical Memory Amount------' +cat /proc/meminfo | grep MemTotal +echo '----RAID Info-------------------' +cat /proc/mdstat +#echo '----PCI-------------------------' +#lspci +#echo '----All Hardware Info-----------' +#lshw +echo '--------------------------------' + +echo diff --git a/cmake/find/opencl.cmake b/cmake/find/opencl.cmake deleted file mode 100644 index 2b0cc7c5dd4..00000000000 --- a/cmake/find/opencl.cmake +++ /dev/null @@ -1,25 +0,0 @@ -# TODO: enable by default -if(0) - option(ENABLE_OPENCL "Enable OpenCL support" ${ENABLE_LIBRARIES}) -endif() - -if(NOT ENABLE_OPENCL) - return() -endif() - -# Intel OpenCl driver: sudo apt install intel-opencl-icd -# @sa https://github.com/intel/compute-runtime/releases - -# OpenCL applications should link with ICD loader -# sudo apt install opencl-headers ocl-icd-libopencl1 -# sudo ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so -# TODO: add https://github.com/OCL-dev/ocl-icd as submodule instead - -find_package(OpenCL) -if(OpenCL_FOUND) - set(USE_OPENCL 1) -else() - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't enable OpenCL support") -endif() - -message(STATUS "Using opencl=${USE_OPENCL}: ${OpenCL_INCLUDE_DIRS} : ${OpenCL_LIBRARIES}") diff --git a/contrib/jemalloc b/contrib/jemalloc index ea6b3e973b4..026764f1999 160000 --- a/contrib/jemalloc +++ b/contrib/jemalloc @@ -1 +1 @@ -Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756 +Subproject commit 026764f19995c53583ab25a3b9c06a2fd74e4689 diff --git a/debian/control b/debian/control index 58efd711d27..1014b8b0a3c 100644 --- a/debian/control +++ b/debian/control @@ -11,7 +11,6 @@ Build-Depends: debhelper (>= 9), libicu-dev, libreadline-dev, gperf, - python, tzdata Standards-Version: 3.9.8 diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 72adba5d762..fd70b03242b 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -17,10 +17,10 @@ ccache --show-stats ||: ccache --zero-stats ||: ln -s /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/libOpenCL.so ||: rm -f CMakeCache.txt -cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS .. +cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER -DENABLE_CHECK_HEAVY_BUILDS=1 $CMAKE_FLAGS .. ninja $NINJA_FLAGS clickhouse-bundle mv ./programs/clickhouse* /output -mv ./src/unit_tests_dbms /output +mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds find . -name '*.so' -print -exec mv '{}' /output \; find . -name '*.so.*' -print -exec mv '{}' /output \; diff --git a/docker/packager/packager b/docker/packager/packager index 909f20acd6d..0a14102ec04 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -105,6 +105,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ # Create combined output archive for split build and for performance tests. if package_type == "performance": result.append("COMBINED_OUTPUT=performance") + cmake_flags.append("-DENABLE_TESTS=0") elif split_binary: result.append("COMBINED_OUTPUT=shared_build") diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 8117d2907bc..aa3f1d738c2 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -1,7 +1,7 @@ # docker build -t yandex/clickhouse-test-base . FROM ubuntu:19.10 -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=10 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11 RUN apt-get update \ && apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \ @@ -43,7 +43,6 @@ RUN apt-get update \ llvm-${LLVM_VERSION} \ moreutils \ perl \ - perl \ pigz \ pkg-config \ tzdata \ diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 53627c78208..35decd907c0 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -1,5 +1,5 @@ # docker build -t yandex/clickhouse-integration-test . -FROM ubuntu:19.10 +FROM yandex/clickhouse-test-base RUN apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get -y install \ @@ -8,7 +8,6 @@ RUN apt-get update \ libreadline-dev \ libicu-dev \ bsdutils \ - llvm-9 \ gdb \ unixodbc \ odbcinst \ @@ -29,9 +28,3 @@ RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-od ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -# Sanitizer options -RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \ - echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "MSAN_OPTIONS='abort_on_error=1'" >> /etc/environment; \ - ln -s /usr/lib/llvm-9/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index df666af8e8e..a4f8af2f388 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update \ tzdata \ vim \ wget \ - && pip3 --no-cache-dir install clickhouse_driver \ + && pip3 --no-cache-dir install clickhouse_driver scipy \ && apt-get purge --yes python3-dev g++ \ && apt-get autoremove --yes \ && apt-get clean \ diff --git a/docker/test/performance-comparison/README.md b/docker/test/performance-comparison/README.md index d877f435c24..3953e99fc0f 100644 --- a/docker/test/performance-comparison/README.md +++ b/docker/test/performance-comparison/README.md @@ -16,7 +16,7 @@ We also consider the test to be unstable, if the observed difference is less tha performance differences above 5% more often than in 5% runs, so the test is likely to have false positives. -### How to read the report +### How to Read the Report The check status summarizes the report in a short text message like `1 faster, 10 unstable`: * `1 faster` -- how many queries became faster, @@ -27,28 +27,50 @@ The check status summarizes the report in a short text message like `1 faster, 1 The report page itself constists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run. -#### Tested commits +#### Tested Commits Informational, no action required. Log messages for the commits that are tested. Note that for the right commit, we show nominal tested commit `pull/*/head` and real tested commit `pull/*/merge`, which is generated by GitHub by merging latest master to the `pull/*/head` and which we actually build and test in CI. -#### Run errors -Action required for every item -- these are errors that must be fixed. The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below. +#### Error Summary +Action required for every item. -#### Slow on client -Action required for every item -- these are errors that must be fixed. This table shows queries that take significantly longer to process on the client than on the server. A possible reason might be sending too much data to the client, e.g., a forgotten `format Null`. +This table summarizes all errors that ocurred during the test. Click the links to go to the description of a particular error. -#### Short queries not marked as short -Action required for every item -- these are errors that must be fixed. This table shows queries that are "short" but not explicitly marked as such. "Short" queries are too fast to meaningfully compare performance, because the changes are drowned by the noise. We consider all queries that run faster than 0.02 s to be "short", and only check the performance if they became slower than this threshold. Probably this mode is not what you want, so you have to increase the query run time to be between 1 and 0.1 s, so that the performance can be compared. You do want this "short" mode for queries that complete "immediately", such as some varieties of `select count(*)`. You have to mark them as "short" explicitly by writing `...`. The value of "short" attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `select count(*) from table where {column1} > {column2}`, to mark only a particular combination of variables as short. +#### Run Errors +Action required for every item -- these are errors that must be fixed. -#### Partial queries -Action required for the cells marked in red. Shows the queries we are unable to run on an old server -- probably because they contain a new function. You should see this table when you add a new function and a performance test for it. Check that the run time and variance are acceptable (run time between 0.1 and 1 seconds, variance below 10%). If not, they will be highlighted in red. +The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below. -#### Changes in performance -Action required for the cells marked in red, and some cheering is appropriate for the cells marked in green. These are the queries for which we observe a statistically significant change in performance. Note that there will always be some false positives -- we try to filter by p < 0.001, and have 2000 queries, so two false positives per run are expected. In practice we have more -- e.g. code layout changed because of some unknowable jitter in compiler internals, so the change we observe is real, but it is a 'false positive' in the sense that it is not directly caused by your changes. If, based on your knowledge of ClickHouse internals, you can decide that the observed test changes are not relevant to the changes made in the tested PR, you can ignore them. +#### Slow on Client +Action required for every item -- these are errors that must be fixed. + +This table shows queries that take significantly longer to process on the client than on the server. A possible reason might be sending too much data to the client, e.g., a forgotten `format Null`. + +#### Unexpected Query Duration +Action required for every item -- these are errors that must be fixed. + +Queries that have "short" duration (on the order of 0.1 s) can't be reliably tested in a normal way, where we perform a small (about ten) measurements for each server, because the signal-to-noise ratio is much smaller. There is a special mode for such queries that instead runs them for a fixed amount of time, normally with much higher number of measurements (up to thousands). This mode must be explicitly enabled by the test author to avoid accidental errors. It must be used only for queries that are meant to complete "immediately", such as `select count(*)`. If your query is not supposed to be "immediate", try to make it run longer, by e.g. processing more data. + +This table shows queries for which the "short" marking is not consistent with the actual query run time -- i.e., a query runs for a long time but is marked as short, or it runs very fast but is not marked as short. + +If your query is really supposed to complete "immediately" and can't be made to run longer, you have to mark it as "short". To do so, write `...` in the test file. The value of "short" attribute is evaluated as a python expression, and substitutions are performed, so you can write something like `select count(*) from table where {column1} > {column2}`, to mark only a particular combination of variables as short. + + +#### Partial Queries +Action required for the cells marked in red. + +Shows the queries we are unable to run on an old server -- probably because they contain a new function. You should see this table when you add a new function and a performance test for it. Check that the run time and variance are acceptable (run time between 0.1 and 1 seconds, variance below 10%). If not, they will be highlighted in red. + +#### Changes in Performance +Action required for the cells marked in red, and some cheering is appropriate for the cells marked in green. + +These are the queries for which we observe a statistically significant change in performance. Note that there will always be some false positives -- we try to filter by p < 0.001, and have 2000 queries, so two false positives per run are expected. In practice we have more -- e.g. code layout changed because of some unknowable jitter in compiler internals, so the change we observe is real, but it is a 'false positive' in the sense that it is not directly caused by your changes. If, based on your knowledge of ClickHouse internals, you can decide that the observed test changes are not relevant to the changes made in the tested PR, you can ignore them. You can find flame graphs for queries with performance changes in the test output archive, in files named as 'my_test_0_Cpu_SELECT 1 FROM....FORMAT Null.left.svg'. First goes the test name, then the query number in the test, then the trace type (same as in `system.trace_log`), and then the server version (left is old and right is new). -#### Unstable queries -Action required for the cells marked in red. These are queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%. +#### Unstable Queries +Action required for the cells marked in red. + +These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%. The most frequent reason for instability is that the query is just too short -- e.g. below 0.1 seconds. Bringing query time to 0.2 seconds or above usually helps. Other reasons may include: @@ -57,24 +79,33 @@ Other reasons may include: Investigating the instablility is the hardest problem in performance testing, and we still have not been able to understand the reasons behind the instability of some queries. There are some data that can help you in the performance test output archive. Look for files named 'my_unstable_test_0_SELECT 1...FORMAT Null.{left,right}.metrics.rep'. They contain metrics from `system.query_log.ProfileEvents` and functions from stack traces from `system.trace_log`, that vary significantly between query runs. The second column is array of \[min, med, max] values for the metric. Say, if you see `PerfCacheMisses` there, it may mean that the code being tested has not-so-cache-local memory access pattern that is sensitive to memory layout. -#### Skipped tests -Informational, no action required. Shows the tests that were skipped, and the reason for it. Normally it is because the data set required for the test was not loaded, or the test is marked as 'long' -- both cases mean that the test is too big to be ran per-commit. +#### Skipped Tests +Informational, no action required. -#### Test performance changes -Informational, no action required. This table summarizes the changes in performance of queries in each test -- how many queries have changed, how many are unstable, and what is the magnitude of the changes. +Shows the tests that were skipped, and the reason for it. Normally it is because the data set required for the test was not loaded, or the test is marked as 'long' -- both cases mean that the test is too big to be ran per-commit. -#### Test times -Action required for the cells marked in red. This table shows the run times for all the tests. You may have to fix two kinds of errors in this table: +#### Test Performance Changes +Informational, no action required. + +This table summarizes the changes in performance of queries in each test -- how many queries have changed, how many are unstable, and what is the magnitude of the changes. + +#### Test Times +Action required for the cells marked in red. + +This table shows the run times for all the tests. You may have to fix two kinds of errors in this table: 1) Average query run time is too long -- probalby means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster. 2) Longest query run time is too long -- some particular queries are taking too long, try to make them faster. The ideal query run time is between 0.1 and 1 s. -#### Concurrent benchmarks -No action required. This table shows the results of a concurrent behcmark where queries from `website` are ran in parallel using `clickhouse-benchmark`, and requests per second values are compared for old and new servers. It shows variability up to 20% for no apparent reason, so it's probably safe to disregard it. We have it for special cases like investigating concurrency effects in memory allocators, where it may be important. +#### Metric Changes +No action required. -#### Metric changes -No action required. These are changes in median values of metrics from `system.asynchronous_metrics_log`. Again, they are prone to unexplained variation and you can safely ignore this table unless it's interesting to you for some particular reason (e.g. you want to compare memory usage). There are also graphs of these metrics in the performance test output archive, in the `metrics` folder. +These are changes in median values of metrics from `system.asynchronous_metrics_log`. These metrics are prone to unexplained variation and you can safely ignore this table unless it's interesting to you for some particular reason (e.g. you want to compare memory usage). There are also graphs of these metrics in the performance test output archive, in the `metrics` folder. -### How to run +#### Errors while Building the Report +Ask a maintainer for help. These errors normally indicate a problem with testing infrastructure. + + +### How to Run Run the entire docker container, specifying PR number (0 for master) and SHA of the commit to test. The reference revision is determined as a nearest ancestor testing release tag. It is possible to specify the reference revision and diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 32ea74193b0..9cb337c4fd6 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -63,7 +63,7 @@ function configure # Make copies of the original db for both servers. Use hardlinks instead # of copying to save space. Before that, remove preprocessed configs and # system tables, because sharing them between servers with hardlinks may - # lead to weird effects. + # lead to weird effects. rm -r left/db ||: rm -r right/db ||: rm -r db0/preprocessed_configs ||: @@ -114,14 +114,12 @@ function run_tests # Just check that the script runs at all "$script_dir/perf.py" --help > /dev/null - changed_test_files="" - # Find the directory with test files. if [ -v CHPC_TEST_PATH ] then # Use the explicitly set path to directory with test files. test_prefix="$CHPC_TEST_PATH" - elif [ "$PR_TO_TEST" = "0" ] + elif [ "$PR_TO_TEST" == "0" ] then # When testing commits from master, use the older test files. This # allows the tests to pass even when we add new functions and tests for @@ -130,14 +128,6 @@ function run_tests else # For PRs, use newer test files so we can test these changes. test_prefix=right/performance - - # If only the perf tests were changed in the PR, we will run only these - # tests. The list of changed tests in changed-test.txt is prepared in - # entrypoint.sh from git diffs, because it has the cloned repo. Used - # to use rsync for that but it was really ugly and not always correct - # (e.g. when the reference SHA is really old and has some other - # differences to the tested SHA, besides the one introduced by the PR). - changed_test_files=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) fi # Determine which tests to run. @@ -146,15 +136,36 @@ function run_tests # Run only explicitly specified tests, if any. # shellcheck disable=SC2010 test_files=$(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}") - elif [ "$changed_test_files" != "" ] + elif [ "$PR_TO_TEST" -ne 0 ] \ + && [ "$(wc -l < changed-test-definitions.txt)" -gt 0 ] \ + && [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ] \ + && [ "$(wc -l < other-changed-files.txt)" -eq 0 ] then - # Use test files that changed in the PR. - test_files="$changed_test_files" + # If only the perf tests were changed in the PR, we will run only these + # tests. The lists of changed files are prepared in entrypoint.sh because + # it has the repository. + test_files=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt) else # The default -- run all tests found in the test dir. test_files=$(ls "$test_prefix"/*.xml) fi + # For PRs w/o changes in test definitons and scripts, test only a subset of + # queries, and run them less times. If the corresponding environment variables + # are already set, keep those values. + if [ "$PR_TO_TEST" -ne 0 ] \ + && [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ] \ + && [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ] + then + CHPC_RUNS=${CHPC_RUNS:-7} + CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-20} + else + CHPC_RUNS=${CHPC_RUNS:-13} + CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0} + fi + export CHPC_RUNS + export CHPC_MAX_QUERIES + # Determine which concurrent benchmarks to run. For now, the only test # we run as a concurrent benchmark is 'website'. Run it as benchmark if we # are also going to run it as a normal test. @@ -184,11 +195,13 @@ function run_tests echo test "$test_name" TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") - # the grep is to filter out set -x output and keep only time output + # The grep is to filter out set -x output and keep only time output. + # The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout. { \ time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 \ + --runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \ -- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \ - } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" \ + } 2>&1 >/dev/null | tee >(grep -v ^+ >> "wall-clock-times.tsv") \ || echo "Test $test_name failed with error code $?" >> "$test_name-err.log" done @@ -197,33 +210,9 @@ function run_tests wait } -# Run some queries concurrently and report the resulting TPS. This additional -# (relatively) short test helps detect concurrency-related effects, because the -# main performance comparison testing is done query-by-query. -function run_benchmark -{ - rm -rf benchmark ||: - mkdir benchmark ||: - - # The list is built by run_tests. - while IFS= read -r file - do - name=$(basename "$file" ".xml") - - "$script_dir/perf.py" --print-queries "$file" > "benchmark/$name-queries.txt" - "$script_dir/perf.py" --print-settings "$file" > "benchmark/$name-settings.txt" - - readarray -t settings < "benchmark/$name-settings.txt" - command=(clickhouse-benchmark --concurrency 6 --cumulative --iterations 1000 --randomize 1 --delay 0 --continue_on_errors "${settings[@]}") - - "${command[@]}" --port 9001 --json "benchmark/$name-left.json" < "benchmark/$name-queries.txt" - "${command[@]}" --port 9002 --json "benchmark/$name-right.json" < "benchmark/$name-queries.txt" - done < benchmarks-to-run.txt -} - function get_profiles_watchdog { - sleep 6000 + sleep 600 echo "The trace collection did not finish in time." >> profile-errors.log @@ -490,8 +479,6 @@ build_log_column_definitions cat analyze/errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||: -short_query_threshold="0.02" - clickhouse-local --query " create view query_display_names as select * from file('analyze/query-display-names.tsv', TSV, @@ -524,18 +511,11 @@ create view query_metric_stats as -- Main statistics for queries -- query time as reported in query log. create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv') as select - -- Comparison mode doesn't make sense for queries that complete - -- immediately (on the same order of time as noise). If query duration is - -- less that some threshold, we just skip it. If there is a significant - -- regression in such query, the time will exceed the threshold, and we - -- well process it normally and detect the regression. - right < $short_query_threshold as short, - - not short and abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail, - not short and abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show, + abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail, + abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show, - not short and not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail, - not short and not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show, + not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail, + not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show, left, right, diff, stat_threshold, if(report_threshold > 0, report_threshold, 0.10) as report_threshold, @@ -640,24 +620,59 @@ create table wall_clock_time_per_test engine Memory as select * create table test_time engine Memory as select test, sum(client) total_client_time, - maxIf(client, not short) query_max, - minIf(client, not short) query_min, - count(*) queries, sum(short) short_queries + max(client) query_max, + min(client) query_min, + count(*) queries from total_client_time_per_query full join queries using (test, query_index) group by test; +create view query_runs as select * from file('analyze/query-runs.tsv', TSV, + 'test text, query_index int, query_id text, version UInt8, time float'); + +-- +-- Guess the number of query runs used for this test. The number is required to +-- calculate and check the average query run time in the report. +-- We have to be careful, because we will encounter: +-- 1) partial queries which run only on one server +-- 2) short queries which run for a much higher number of times +-- 3) some errors that make query run for a different number of times on a +-- particular server. +-- +create view test_runs as + select test, + -- Default to 7 runs if there are only 'short' queries in the test, and + -- we can't determine the number of runs. + if((ceil(medianOrDefaultIf(t.runs, not short), 0) as r) != 0, r, 7) runs + from ( + select + -- The query id is the same for both servers, so no need to divide here. + uniqExact(query_id) runs, + (test, query_index) in + (select * from file('analyze/marked-short-queries.tsv', TSV, + 'test text, query_index int')) + as short, + test, query_index + from query_runs + group by test, query_index + ) t + group by test + ; + create table test_times_report engine File(TSV, 'report/test-times.tsv') as select wall_clock_time_per_test.test, real, toDecimal64(total_client_time, 3), queries, - short_queries, toDecimal64(query_max, 3), toDecimal64(real / queries, 3) avg_real_per_query, - toDecimal64(query_min, 3) + toDecimal64(query_min, 3), + runs from test_time - -- wall clock times are also measured for skipped tests, so don't - -- do full join - left join wall_clock_time_per_test using test + -- wall clock times are also measured for skipped tests, so don't + -- do full join + left join wall_clock_time_per_test + on wall_clock_time_per_test.test = test_time.test + full join test_runs + on test_runs.test = test_time.test order by avg_real_per_query desc; -- report for all queries page, only main metric @@ -685,32 +700,48 @@ create table queries_for_flamegraph engine File(TSVWithNamesAndTypes, select test, query_index from queries where unstable_show or changed_show ; --- List of queries that have 'short' duration, but are not marked as 'short' by --- the test author (we report them). -create table unmarked_short_queries_report - engine File(TSV, 'report/unmarked-short-queries.tsv') - as select time, test, query_index, query_display_name + +create view shortness + as select + (test, query_index) in + (select * from file('analyze/marked-short-queries.tsv', TSV, + 'test text, query_index int')) + as marked_short, + time, test, query_index, query_display_name from ( - select right time, test, query_index from queries where short + select right time, test, query_index from queries union all select time_median, test, query_index from partial_query_times - where time_median < $short_query_threshold ) times left join query_display_names on times.test = query_display_names.test and times.query_index = query_display_names.query_index - where (test, query_index) not in - (select * from file('analyze/marked-short-queries.tsv', TSV, - 'test text, query_index int')) - order by test, query_index ; +-- Report of queries that have inconsistent 'short' markings: +-- 1) have short duration, but are not marked as 'short' +-- 2) the reverse -- marked 'short' but take too long. +-- The threshold for 2) is significantly larger than the threshold for 1), to +-- avoid jitter. +create table inconsistent_short_marking_report + engine File(TSV, 'report/unexpected-query-duration.tsv') + as select + multiIf(marked_short and time > 0.1, '"short" queries must run faster than 0.02 s', + not marked_short and time < 0.02, '"normal" queries must run longer than 0.1 s', + '') problem, + marked_short, time, + test, query_index, query_display_name + from shortness + where problem != '' + ; + + -------------------------------------------------------------------------------- -- various compatibility data formats follow, not related to the main report -- keep the table in old format so that we can analyze new and old data together create table queries_old_format engine File(TSVWithNamesAndTypes, 'queries.rep') - as select short, changed_fail, unstable_fail, left, right, diff, + as select 0 short, changed_fail, unstable_fail, left, right, diff, stat_threshold, test, query_display_name query from queries ; @@ -1008,9 +1039,6 @@ case "$stage" in # Ignore the errors to collect the log and build at least some report, anyway time run_tests ||: ;& -"run_benchmark") - time run_benchmark 2> >(tee -a run-errors.tsv 1>&2) ||: - ;& "get_profiles") # Check for huge pages. cat /sys/kernel/mm/transparent_hugepage/enabled > thp-enabled.txt ||: diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml index c6d9f7ea582..cee7dc3ff16 100644 --- a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml +++ b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml @@ -1,8 +1,6 @@ - 10000000 - 0 1 1 1 diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index 9e9a46a3ce6..ed2e542eadd 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -97,13 +97,10 @@ then # tests for use by compare.sh. Compare to merge base, because master might be # far in the future and have unrelated test changes. base=$(git -C right/ch merge-base pr origin/master) - git -C right/ch diff --name-only "$base" pr | tee changed-tests.txt - if grep -vq '^tests/performance' changed-tests.txt - then - # Have some other changes besides the tests, so truncate the test list, - # meaning, run all tests. - : > changed-tests.txt - fi + git -C right/ch diff --name-only "$base" pr -- . | tee all-changed-files.txt + git -C right/ch diff --name-only "$base" pr -- tests/performance | tee changed-test-definitions.txt + git -C right/ch diff --name-only "$base" pr -- docker/test/performance-comparison | tee changed-test-scripts.txt + git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt fi # Set python output encoding so that we can print queries with Russian letters. diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index 05e89c9e44c..79cdc8ea8d2 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -1,16 +1,21 @@ #!/usr/bin/python3 -import os -import sys -import itertools -import clickhouse_driver -import xml.etree.ElementTree as et import argparse +import clickhouse_driver +import itertools +import functools +import math +import os import pprint +import random import re +import statistics import string +import sys import time import traceback +import xml.etree.ElementTree as et +from scipy import stats def tsv_escape(s): return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') @@ -20,7 +25,8 @@ parser = argparse.ArgumentParser(description='Run performance test.') parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.") parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.") -parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') +parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.') +parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.') parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.') parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.') parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.') @@ -62,18 +68,13 @@ def substitute_parameters(query_templates, other_templates = []): # Build a list of test queries, substituting parameters to query templates, # and reporting the queries marked as short. test_queries = [] +is_short = [] for e in root.findall('query'): - new_queries = [] - if 'short' in e.attrib: - new_queries, [is_short] = substitute_parameters([e.text], [[e.attrib['short']]]) - for i, s in enumerate(is_short): - # Don't print this if we only need to print the queries. - if eval(s) and not args.print_queries: - print(f'short\t{i + len(test_queries)}') - else: - new_queries = substitute_parameters([e.text]) - + new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]]) test_queries += new_queries + is_short += [eval(s) for s in new_is_short] + +assert(len(test_queries) == len(is_short)) # If we're only asked to print the queries, do that and exit @@ -82,6 +83,11 @@ if args.print_queries: print(q) exit(0) +# Print short queries +for i, s in enumerate(is_short): + if s: + print(f'short\t{i}') + # If we're only asked to print the settings, do that and exit. These are settings # for clickhouse-benchmark, so we print them as command line arguments, e.g. # '--max_memory_usage=10000000'. @@ -98,25 +104,13 @@ if not args.long: print('skipped\tTest is tagged as long.') sys.exit(0) -# Check main metric to detect infinite tests. We shouldn't have such tests anymore, -# but we did in the past, and it is convenient to be able to process old tests. -main_metric_element = root.find('main_metric/*') -if main_metric_element is not None and main_metric_element.tag != 'min_time': - raise Exception('Only the min_time main metric is supported. This test uses \'{}\''.format(main_metric_element.tag)) - -# Another way to detect infinite tests. They should have an appropriate main_metric -# but sometimes they don't. -infinite_sign = root.find('.//average_speed_not_changing_for_ms') -if infinite_sign is not None: - raise Exception('Looks like the test is infinite (sign 1)') - # Print report threshold for the test if it is set. if 'max_ignored_relative_change' in root.attrib: print(f'report-threshold\t{root.attrib["max_ignored_relative_change"]}') # Open connections servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)] -connections = [clickhouse_driver.Client(**server) for server in servers] +all_connections = [clickhouse_driver.Client(**server) for server in servers] for s in servers: print('server\t{}\t{}'.format(s['host'], s['port'])) @@ -126,7 +120,7 @@ for s in servers: # connection loses the changes in settings. drop_query_templates = [q.text for q in root.findall('drop_query')] drop_queries = substitute_parameters(drop_query_templates) -for conn_index, c in enumerate(connections): +for conn_index, c in enumerate(all_connections): for q in drop_queries: try: c.execute(q) @@ -142,7 +136,7 @@ for conn_index, c in enumerate(connections): # configurable). So the end result is uncertain, but hopefully we'll be able to # run at least some queries. settings = root.findall('settings/*') -for conn_index, c in enumerate(connections): +for conn_index, c in enumerate(all_connections): for s in settings: try: q = f"set {s.tag} = '{s.text}'" @@ -154,7 +148,7 @@ for conn_index, c in enumerate(connections): # Check tables that should exist. If they don't exist, just skip this test. tables = [e.text for e in root.findall('preconditions/table_exists')] for t in tables: - for c in connections: + for c in all_connections: try: res = c.execute("select 1 from {} limit 1".format(t)) except: @@ -176,7 +170,7 @@ for q in create_queries: file = sys.stderr) sys.exit(1) -for conn_index, c in enumerate(connections): +for conn_index, c in enumerate(all_connections): for q in create_queries: c.execute(q) print(f'create\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') @@ -184,13 +178,19 @@ for conn_index, c in enumerate(connections): # Run fill queries fill_query_templates = [q.text for q in root.findall('fill_query')] fill_queries = substitute_parameters(fill_query_templates) -for conn_index, c in enumerate(connections): +for conn_index, c in enumerate(all_connections): for q in fill_queries: c.execute(q) print(f'fill\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') +# Run the queries in randomized order, but preserve their indexes as specified +# in the test XML. To avoid using too much time, limit the number of queries +# we run per test. +queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries))) + # Run test queries. -for query_index, q in enumerate(test_queries): +for query_index in queries_to_run: + q = test_queries[query_index] query_prefix = f'{test_name}.query{query_index}' # We have some crazy long queries (about 100kB), so trim them to a sane @@ -208,11 +208,12 @@ for query_index, q in enumerate(test_queries): # new one. We want to run them on the new server only, so that the PR author # can ensure that the test works properly. Remember the errors we had on # each server. - query_error_on_connection = [None] * len(connections); - for conn_index, c in enumerate(connections): + query_error_on_connection = [None] * len(all_connections); + for conn_index, c in enumerate(all_connections): try: prewarm_id = f'{query_prefix}.prewarm0' - res = c.execute(q, query_id = prewarm_id) + # Will also detect too long queries during warmup stage + res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10}) print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') except KeyboardInterrupt: raise @@ -222,7 +223,6 @@ for query_index, q in enumerate(test_queries): query_error_on_connection[conn_index] = traceback.format_exc(); continue - # Report all errors that ocurred during prewarm and decide what to do next. # If prewarm fails for the query on all servers -- skip the query and # continue testing the next query. @@ -236,21 +236,29 @@ for query_index, q in enumerate(test_queries): if len(no_errors) == 0: continue - elif len(no_errors) < len(connections): + elif len(no_errors) < len(all_connections): print(f'partial\t{query_index}\t{no_errors}') + this_query_connections = [all_connections[index] for index in no_errors] + # Now, perform measured runs. # Track the time spent by the client to process this query, so that we can # notice the queries that take long to process on the client side, e.g. by # sending excessive data. start_seconds = time.perf_counter() server_seconds = 0 - for run in range(0, args.runs): - run_id = f'{query_prefix}.run{run}' - for conn_index, c in enumerate(connections): - if query_error_on_connection[conn_index]: - continue + profile_seconds = 0 + run = 0 + # Arrays of run times for each connection. + all_server_times = [] + for conn_index, c in enumerate(this_query_connections): + all_server_times.append([]) + + while True: + run_id = f'{query_prefix}.run{run}' + + for conn_index, c in enumerate(this_query_connections): try: res = c.execute(q, query_id = run_id) except Exception as e: @@ -259,22 +267,79 @@ for query_index, q in enumerate(test_queries): e.message = run_id + ': ' + e.message raise - print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}') - server_seconds += c.last_query.elapsed + elapsed = c.last_query.elapsed + all_server_times[conn_index].append(elapsed) - if c.last_query.elapsed > 10: + server_seconds += elapsed + print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}') + + if elapsed > 10: # Stop processing pathologically slow queries, to avoid timing out # the entire test task. This shouldn't really happen, so we don't # need much handling for this case and can just exit. - print(f'The query no. {query_index} is taking too long to run ({c.last_query.elapsed} s)', file=sys.stderr) + print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr) exit(2) + # Be careful with the counter, after this line it's the next iteration + # already. + run += 1 + + # Try to run any query for at least the specified number of times, + # before considering other stop conditions. + if run < args.runs: + continue + + # For very short queries we have a special mode where we run them for at + # least some time. The recommended lower bound of run time for "normal" + # queries is about 0.1 s, and we run them about 10 times, giving the + # time per query per server of about one second. Use this value as a + # reference for "short" queries. + if is_short[query_index]: + if server_seconds >= 2 * len(this_query_connections): + break + # Also limit the number of runs, so that we don't go crazy processing + # the results -- 'eqmed.sql' is really suboptimal. + if run >= 500: + break + else: + if run >= args.runs: + break + client_seconds = time.perf_counter() - start_seconds print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}') + #print(all_server_times) + #print(stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue) + + # Run additional profiling queries to collect profile data, but only if test times appeared to be different. + # We have to do it after normal runs because otherwise it will affect test statistics too much + if len(all_server_times) == 2 and stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue < 0.1: + run = 0 + while True: + run_id = f'{query_prefix}.profile{run}' + + for conn_index, c in enumerate(this_query_connections): + try: + res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000}) + print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}') + except Exception as e: + # Add query id to the exception to make debugging easier. + e.args = (run_id, *e.args) + e.message = run_id + ': ' + e.message + raise + + elapsed = c.last_query.elapsed + profile_seconds += elapsed + + run += 1 + # Don't spend too much time for profile runs + if run > args.runs or profile_seconds > 10: + break + # And don't bother with short queries + # Run drop queries drop_queries = substitute_parameters(drop_query_templates) -for conn_index, c in enumerate(connections): +for conn_index, c in enumerate(all_connections): for q in drop_queries: c.execute(q) print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index e9e2ac68c1e..bd5dedfceed 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -98,6 +98,9 @@ th {{ tr:nth-child(odd) td {{filter: brightness(90%);}} +.unexpected-query-duration tr :nth-child(2), +.unexpected-query-duration tr :nth-child(3), +.unexpected-query-duration tr :nth-child(5), .all-query-times tr :nth-child(1), .all-query-times tr :nth-child(2), .all-query-times tr :nth-child(3), @@ -126,7 +129,6 @@ tr:nth-child(odd) td {{filter: brightness(90%);}} .test-times tr :nth-child(5), .test-times tr :nth-child(6), .test-times tr :nth-child(7), -.test-times tr :nth-child(8), .concurrent-benchmarks tr :nth-child(2), .concurrent-benchmarks tr :nth-child(3), .concurrent-benchmarks tr :nth-child(4), @@ -205,9 +207,11 @@ def tableStart(title): global table_anchor table_anchor = cls anchor = currentTableAnchor() + help_anchor = '-'.join(title.lower().split(' ')); return f"""

{title} + ?

""" @@ -250,7 +254,7 @@ def addSimpleTable(caption, columns, rows, pos=None): def add_tested_commits(): global report_errors try: - addSimpleTable('Tested commits', ['Old', 'New'], + addSimpleTable('Tested Commits', ['Old', 'New'], [['
{}
'.format(x) for x in [open('left-commit.txt').read(), open('right-commit.txt').read()]]]) @@ -276,7 +280,7 @@ def add_report_errors(): if not report_errors: return - text = tableStart('Errors while building the report') + text = tableStart('Errors while Building the Report') text += tableHeader(['Error']) for x in report_errors: text += tableRow([x]) @@ -290,7 +294,7 @@ def add_errors_explained(): return text = '' - text += tableStart('Error summary') + text += tableStart('Error Summary') text += tableHeader(['Description']) for row in errors_explained: text += tableRow(row) @@ -308,26 +312,26 @@ if args.report == 'main': run_error_rows = tsvRows('run-errors.tsv') error_tests += len(run_error_rows) - addSimpleTable('Run errors', ['Test', 'Error'], run_error_rows) + addSimpleTable('Run Errors', ['Test', 'Error'], run_error_rows) if run_error_rows: errors_explained.append([f'There were some errors while running the tests']); slow_on_client_rows = tsvRows('report/slow-on-client.tsv') error_tests += len(slow_on_client_rows) - addSimpleTable('Slow on client', + addSimpleTable('Slow on Client', ['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'], slow_on_client_rows) if slow_on_client_rows: errors_explained.append([f'Some queries are taking noticeable time client-side (missing `FORMAT Null`?)']); - unmarked_short_rows = tsvRows('report/unmarked-short-queries.tsv') + unmarked_short_rows = tsvRows('report/unexpected-query-duration.tsv') error_tests += len(unmarked_short_rows) - addSimpleTable('Short queries not marked as short', - ['New client time, s', 'Test', '#', 'Query'], + addSimpleTable('Unexpected Query Duration', + ['Problem', 'Marked as "short"?', 'Run time, s', 'Test', '#', 'Query'], unmarked_short_rows) if unmarked_short_rows: - errors_explained.append([f'Some queries have short duration but are not explicitly marked as "short"']); + errors_explained.append([f'Some queries have unexpected duration']); def add_partial(): rows = tsvRows('report/partial-queries-report.tsv') @@ -335,7 +339,7 @@ if args.report == 'main': return global unstable_partial_queries, slow_average_tests, tables - text = tableStart('Partial queries') + text = tableStart('Partial Queries') columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query'] text += tableHeader(columns) attrs = ['' for c in columns] @@ -366,7 +370,7 @@ if args.report == 'main': global faster_queries, slower_queries, tables - text = tableStart('Changes in performance') + text = tableStart('Changes in Performance') columns = [ 'Old, s', # 0 'New, s', # 1 @@ -423,7 +427,7 @@ if args.report == 'main': 'Query' #7 ] - text = tableStart('Unstable queries') + text = tableStart('Unstable Queries') text += tableHeader(columns) attrs = ['' for c in columns] @@ -444,9 +448,9 @@ if args.report == 'main': add_unstable_queries() skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv') - addSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows) + addSimpleTable('Skipped Tests', ['Test', 'Reason'], skipped_tests_rows) - addSimpleTable('Test performance changes', + addSimpleTable('Test Performance Changes', ['Test', 'Ratio of speedup (-) or slowdown (+)', 'Queries', 'Total not OK', 'Changed perf', 'Unstable'], tsvRows('report/test-perf-changes.tsv')) @@ -457,39 +461,38 @@ if args.report == 'main': return columns = [ - 'Test', #0 + 'Test', #0 'Wall clock time, s', #1 'Total client time, s', #2 'Total queries', #3 - 'Ignored short queries', #4 - 'Longest query
(sum for all runs), s', #5 - 'Avg wall clock time
(sum for all runs), s', #6 - 'Shortest query
(sum for all runs), s', #7 + 'Longest query
(sum for all runs), s', #4 + 'Avg wall clock time
(sum for all runs), s', #5 + 'Shortest query
(sum for all runs), s', #6 + # 'Runs' #7 ] - text = tableStart('Test times') + text = tableStart('Test Times') text += tableHeader(columns) - nominal_runs = 7 # FIXME pass this as an argument - total_runs = (nominal_runs + 1) * 2 # one prewarm run, two servers - allowed_average_run_time = allowed_single_run_time + 60 / total_runs; # some allowance for fill/create queries + allowed_average_run_time = 3.75 # 60 seconds per test at 7 runs attrs = ['' for c in columns] for r in rows: anchor = f'{currentTableAnchor()}.{r[0]}' - if float(r[6]) > allowed_average_run_time * total_runs: + total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers + if float(r[5]) > allowed_average_run_time * total_runs: # FIXME should be 15s max -- investigate parallel_insert slow_average_tests += 1 - attrs[6] = f'style="background: {color_bad}"' + attrs[5] = f'style="background: {color_bad}"' errors_explained.append([f'The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up']) else: - attrs[6] = '' + attrs[5] = '' - if float(r[5]) > allowed_single_run_time * total_runs: + if float(r[4]) > allowed_single_run_time * total_runs: slow_average_tests += 1 - attrs[5] = f'style="background: {color_bad}"' + attrs[4] = f'style="background: {color_bad}"' errors_explained.append([f'Some query of the test \'{r[0]}\' is too slow to run. See the all queries report']) else: - attrs[5] = '' + attrs[4] = '' text += tableRow(r, attrs, anchor) @@ -498,74 +501,7 @@ if args.report == 'main': add_test_times() - def add_benchmark_results(): - if not os.path.isfile('benchmark/website-left.json'): - return - - json_reports = [json.load(open(f'benchmark/website-{x}.json')) for x in ['left', 'right']] - stats = [next(iter(x.values()))["statistics"] for x in json_reports] - qps = [x["QPS"] for x in stats] - queries = [x["num_queries"] for x in stats] - errors = [x["num_errors"] for x in stats] - relative_diff = (qps[1] - qps[0]) / max(0.01, qps[0]); - times_diff = max(qps) / max(0.01, min(qps)) - - all_rows = [] - header = ['Benchmark', 'Metric', 'Old', 'New', 'Relative difference', 'Times difference']; - - attrs = ['' for x in header] - row = ['website', 'queries', f'{queries[0]:d}', f'{queries[1]:d}', '--', '--'] - attrs[0] = 'rowspan=2' - all_rows.append([row, attrs]) - - attrs = ['' for x in header] - row = [None, 'queries/s', f'{qps[0]:.3f}', f'{qps[1]:.3f}', f'{relative_diff:.3f}', f'x{times_diff:.3f}'] - if abs(relative_diff) > 0.1: - # More queries per second is better. - if relative_diff > 0.: - attrs[4] = f'style="background: {color_good}"' - else: - attrs[4] = f'style="background: {color_bad}"' - else: - attrs[4] = '' - all_rows.append([row, attrs]); - - if max(errors): - all_rows[0][1][0] = "rowspan=3" - row = [''] * (len(header)) - attrs = ['' for x in header] - - attrs[0] = None - row[1] = 'errors' - row[2] = f'{errors[0]:d}' - row[3] = f'{errors[1]:d}' - row[4] = '--' - row[5] = '--' - if errors[0]: - attrs[2] += f' style="background: {color_bad}" ' - if errors[1]: - attrs[3] += f' style="background: {color_bad}" ' - - all_rows.append([row, attrs]) - - text = tableStart('Concurrent benchmarks') - text += tableHeader(header) - for row, attrs in all_rows: - text += tableRow(row, attrs) - text += tableEnd() - - global tables - tables.append(text) - - try: - add_benchmark_results() - except: - report_errors.append( - traceback.format_exception_only( - *sys.exc_info()[:2])[-1]) - pass - - addSimpleTable('Metric changes', + addSimpleTable('Metric Changes', ['Metric', 'Old median value', 'New median value', 'Relative difference', 'Times difference'], tsvRows('metrics/changes.tsv')) @@ -656,7 +592,7 @@ elif args.report == 'all-queries': 'Query', #9 ] - text = tableStart('All query times') + text = tableStart('All Query Times') text += tableHeader(columns) attrs = ['' for c in columns] diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 4a9ad891883..b6b48cd0943 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -48,13 +48,6 @@ fi ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml -echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment -echo "TSAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment -echo "ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "UBSAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "LLVM_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment - service zookeeper start sleep 5 service clickhouse-server start && sleep 5 diff --git a/docker/test/stateless_unbundled/run.sh b/docker/test/stateless_unbundled/run.sh index 4a9ad891883..b6b48cd0943 100755 --- a/docker/test/stateless_unbundled/run.sh +++ b/docker/test/stateless_unbundled/run.sh @@ -48,13 +48,6 @@ fi ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml -echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment -echo "TSAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment -echo "ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "UBSAN_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment -echo "LLVM_SYMBOLIZER_PATH=/usr/lib/llvm-10/bin/llvm-symbolizer" >> /etc/environment - service zookeeper start sleep 5 service clickhouse-server start && sleep 5 diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 47c8603babb..8295e90b3ef 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -43,8 +43,6 @@ ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/u ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/ ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/ -echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment -echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment start diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile index ae5ea1820b0..0f65649fb76 100644 --- a/docker/test/unit/Dockerfile +++ b/docker/test/unit/Dockerfile @@ -5,12 +5,5 @@ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN apt-get install gdb -CMD ln -s /usr/lib/llvm-8/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; \ - echo "TSAN_OPTIONS='halt_on_error=1 history_size=7'" >> /etc/environment; \ - echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer" >> /etc/environment; \ - echo "UBSAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer" >> /etc/environment; \ - echo "TSAN_SYMBOLIZER_PATH=/usr/lib/llvm-8/bin/llvm-symbolizer" >> /etc/environment; \ - echo "LLVM_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer" >> /etc/environment; \ - service zookeeper start && sleep 7 && /usr/share/zookeeper/bin/zkCli.sh -server localhost:2181 -create create /clickhouse_test ''; \ +CMD service zookeeper start && sleep 7 && /usr/share/zookeeper/bin/zkCli.sh -server localhost:2181 -create create /clickhouse_test ''; \ gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms | tee test_output/test_result.txt diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 596fe20be90..6d57dfde9cd 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -38,6 +38,7 @@ toc_title: Adopters | Deutsche Bank | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | | Diva-e | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | Ecwid | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) | +| eBay | E-commerce | TBA | — | — | [Webinar, Sep 2020](https://altinity.com/webinarspage/2020/09/08/migrating-from-druid-to-next-gen-olap-on-clickhouse-ebays-experience) | | Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | FastNetMon | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) | | Flipkart | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) | diff --git a/docs/en/operations/performance-test.md b/docs/en/operations/performance-test.md index c3ef10da774..984bbe02174 100644 --- a/docs/en/operations/performance-test.md +++ b/docs/en/operations/performance-test.md @@ -13,49 +13,41 @@ With this instruction you can run basic ClickHouse performance test on any serve 4. ssh to the server and download it with wget: ```bash # For amd64: -wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse +wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse # For aarch64: -wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse +wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse # Then do: chmod a+x clickhouse ``` -5. Download configs: -```bash -wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml -wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml -mkdir config.d -wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml -wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -``` -6. Download benchmark files: +5. Download benchmark files: ```bash wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql ``` -7. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows). +6. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows). ```bash wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . ``` -8. Run the server: +7. Run the server: ```bash ./clickhouse server ``` -9. Check the data: ssh to the server in another terminal +8. Check the data: ssh to the server in another terminal ```bash ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 ``` -10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter. +9. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `--max_memory_usage 100000000000` parameter. ```bash mcedit benchmark-new.sh ``` -11. Run the benchmark: +10. Run the benchmark: ```bash ./benchmark-new.sh hits_100m_obfuscated ``` -12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +11. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com All the results are published here: https://clickhouse.tech/benchmark/hardware/ diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index c1ac1d0d92d..ee0373c70b4 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -521,6 +521,22 @@ For more information, see the MergeTreeSettings.h header file. ``` +## replicated\_merge\_tree {#server_configuration_parameters-replicated_merge_tree} + +Fine tuning for tables in the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/mergetree.md). + +This setting has higher priority. + +For more information, see the MergeTreeSettings.h header file. + +**Example** + +``` xml + + 5 + +``` + ## openSSL {#server_configuration_parameters-openssl} SSL client/server configuration. diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index 0486392d259..f803e694eb7 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -60,6 +60,31 @@ A maximum number of bytes (uncompressed data) that can be read from a table when What to do when the volume of data read exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +## max\_rows\_to\_read_leaf {#max-rows-to-read-leaf} + +The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little. + +A maximum number of rows that can be read from a local table on a leaf node when running a distributed query. While +distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read +stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards +and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both +tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query +with `max_rows_to_read_leaf=150` will succeed since leaf nodes will read 100 rows at max. + +## max\_bytes\_to\_read_leaf {#max-bytes-to-read-leaf} + +A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running +a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will +be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node. +For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data. +Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail +as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read +100 bytes at max. + +## read\_overflow\_mode_leaf {#read-overflow-mode-leaf} + +What to do when the volume of data read exceeds one of the leaf limits: ‘throw’ or ‘break’. By default, throw. + ## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by} A maximum number of unique keys received from aggregation. This setting lets you limit memory consumption when aggregating. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 76fcfa2a616..e97a418f1ed 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -940,6 +940,8 @@ This algorithm chooses the first replica in the set or a random replica if the f The `first_or_random` algorithm solves the problem of the `in_order` algorithm. With `in_order`, if one replica goes down, the next one gets a double load while the remaining replicas handle the usual amount of traffic. When using the `first_or_random` algorithm, the load is evenly distributed among replicas that are still available. +It's possible to explicitly define what the first replica is by using the setting `load_balancing_first_offset`. This gives more control to rebalance query workloads among replicas. + ### Round Robin {#load_balancing-round_robin} ``` sql @@ -1815,7 +1817,7 @@ Default value: 8192. Turns on or turns off using of single dictionary for the data part. -By default, ClickHouse server monitors the size of dictionaries and if a dictionary overflows then the server starts to write the next one. To prohibit creating several dictionaries set `low_cardinality_use_single_dictionary_for_part = 1`. +By default, the ClickHouse server monitors the size of dictionaries and if a dictionary overflows then the server starts to write the next one. To prohibit creating several dictionaries set `low_cardinality_use_single_dictionary_for_part = 1`. Possible values: @@ -1974,4 +1976,54 @@ Possible values: Default value: `120` seconds. +## output_format_pretty_max_value_width {#output_format_pretty_max_value_width} + +Limits the width of value displayed in [Pretty](../../interfaces/formats.md#pretty) formats. If the value width exceeds the limit, the value is cut. + +Possible values: + +- Positive integer. +- 0 — The value is cut completely. + +Default value: `10000` symbols. + +**Examples** + +Query: +```sql +SET output_format_pretty_max_value_width = 10; +SELECT range(number) FROM system.numbers LIMIT 10 FORMAT PrettyCompactNoEscapes; +``` +Result: +```text +┌─range(number)─┐ +│ [] │ +│ [0] │ +│ [0,1] │ +│ [0,1,2] │ +│ [0,1,2,3] │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +└───────────────┘ +``` + +Query with zero width: +```sql +SET output_format_pretty_max_value_width = 0; +SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes; +``` +Result: +```text +┌─range(number)─┐ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +└───────────────┘ +``` + [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md index 4dca13ed1b4..c62502cf46e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md @@ -6,10 +6,13 @@ toc_priority: 143 Syntax: `maxMap(key, value)` or `maxMap(Tuple(key, value))` -Calculates the maximum from `value` array according to the keys specified in the ‘key’ array. -Passing tuple of keys and values arrays is synonymical to passing two arrays of keys and values. -The number of elements in ‘key’ and ‘value’ must be the same for each row that is totaled. -Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys. +Calculates the maximum from `value` array according to the keys specified in the `key` array. + +Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values. + +The number of elements in `key` and `value` must be the same for each row that is totaled. + +Returns a tuple of two arrays: keys and values calculated for the corresponding keys. Example: diff --git a/docs/en/sql-reference/aggregate-functions/reference/minmap.md b/docs/en/sql-reference/aggregate-functions/reference/minmap.md index 1b946dea209..9408d0ddfff 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/minmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/minmap.md @@ -8,7 +8,7 @@ Syntax: `minMap(key, value)` or `minMap(Tuple(key, value))` Calculates the minimum from `value` array according to the keys specified in the `key` array. -Passing tuple of keys and values arrays is a synonym to passing two arrays of keys and values. +Passing a tuple of keys and value ​​arrays is identical to passing two arrays of keys and values. The number of elements in `key` and `value` must be the same for each row that is totaled. diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index 7ccac61e4d7..1a0cedb99c7 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -21,7 +21,7 @@ LowCardinality(data_type) `LowCardinality` is a superstructure that changes a data storage method and rules of data processing. ClickHouse applies [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) to `LowCardinality`-columns. Operating with dictionary encoded data significantly increases performance of [SELECT](../../sql-reference/statements/select/index.md) queries for many applications. -The efficiency of using `LowCarditality` data type depends on data diversity. If a dictionary contains less than 10,000 distinct values, then ClickHouse mostly shows higher efficiency of data reading and storing. If a dictionary contains more than 100,000 distinct values, then ClickHouse can perform worse in comparison with using ordinary data types. +The efficiency of using `LowCardinality` data type depends on data diversity. If a dictionary contains less than 10,000 distinct values, then ClickHouse mostly shows higher efficiency of data reading and storing. If a dictionary contains more than 100,000 distinct values, then ClickHouse can perform worse in comparison with using ordinary data types. Consider using `LowCardinality` instead of [Enum](../../sql-reference/data-types/enum.md) when working with strings. `LowCardinality` provides more flexibility in use and often reveals the same or higher efficiency. diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 67361c350c7..e5c321041c2 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -516,14 +516,14 @@ Result: **See Also** -- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/) +- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/) - [RFC 1123](https://tools.ietf.org/html/rfc1123) - [toDate](#todate) - [toDateTime](#todatetime) ## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS} -This function is similar to [‘parseDateTimeBestEffort’](#parsedatetimebesteffort), the only difference is that this function prefers US style (`MM/DD/YYYY` etc) in case of ambiguouty. +This function is similar to [‘parseDateTimeBestEffort’](#parsedatetimebesteffort), the only difference is that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity. **Syntax** @@ -541,7 +541,7 @@ parseDateTimeBestEffortUS(time_string [, time_zone]); - A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). - A string with a date and a time component: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. - A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` etc. -- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. +- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted as `2000-01`. - A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. **Returned value** diff --git a/docs/ru/commercial/index.md b/docs/ru/commercial/index.md index 6bc3c781161..c6c440c17e8 100644 --- a/docs/ru/commercial/index.md +++ b/docs/ru/commercial/index.md @@ -6,4 +6,14 @@ toc_title: "\u041A\u043E\u043C\u043C\u0435\u0440\u0447\u0435\u0441\u043A\u0438\u \ \u0443\u0441\u043B\u0443\u0433\u0438" --- +# Коммерческие услуги {#clickhouse-commercial-services} +Данный раздел содержит описание коммерческих услуг, предоставляемых для ClickHouse. Поставщики этих услуг — независимые компании, которые могут не быть аффилированы с Яндексом. + +Категории услуг: + +- Облачные услуги [Cloud](../commercial/cloud.md) +- Поддержка [Support](../commercial/support.md) + +!!! note "Для поставщиков услуг" + Если вы — представитель компании-поставщика услуг, вы можете отправить запрос на добавление вашей компании и ваших услуг в соответствующий раздел данной документации (или на добавление нового раздела, если ваши услуги не соответствуют ни одной из существующих категорий). Чтобы отправить запрос (pull-request) на добавление описания в документацию, нажмите на значок "карандаша" в правом верхнем углу страницы. Если ваши услуги доступны в только отдельных регионах, не забудьте указать это на соответствующих локализованных страницах (и обязательно отметьте это при отправке заявки). diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 3c80fe663f1..2f89317a0eb 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -43,9 +43,6 @@ ORDER BY expr Описание параметров смотрите в [описании запроса CREATE](../../../engines/table-engines/mergetree-family/mergetree.md). -!!! note "Примечание" - `INDEX` — экспериментальная возможность, смотрите [Индексы пропуска данных](#table_engine-mergetree-data_skipping-indexes). - ### Секции запроса {#mergetree-query-clauses} - `ENGINE` — имя и параметры движка. `ENGINE = MergeTree()`. `MergeTree` не имеет параметров. @@ -269,7 +266,7 @@ ClickHouse не может использовать индекс, если зн ClickHouse использует эту логику не только для последовательностей дней месяца, но и для любого частично-монотонного первичного ключа. -### Индексы пропуска данных (экспериментальная функциональность) {#table_engine-mergetree-data_skipping-indexes} +### Индексы пропуска данных {#table_engine-mergetree-data_skipping-indexes} Объявление индексов при определении столбцов в запросе `CREATE`. @@ -566,7 +563,7 @@ ALTER TABLE example_table - `volume_name_N` — название тома. Названия томов должны быть уникальны. - `disk` — диск, находящийся внутри тома. - `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. -- `move_factor` — доля свободного места, при превышении которого данные начинают перемещаться на следующий том, если он есть (по умолчанию 0.1). +- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Примеры конфигураций: diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 04bca115974..dd68f7eb646 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1050,13 +1050,13 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_ Для обмена данными с экосистемой Hadoop можно использовать движки таблиц [HDFS](../engines/table-engines/integrations/hdfs.md). -## Arrow {data-format-arrow} +## Arrow {#data-format-arrow} [Apache Arrow](https://arrow.apache.org/) поставляется с двумя встроенными поколоночнами форматами хранения. ClickHouse поддерживает операции чтения и записи для этих форматов. `Arrow` — это Apache Arrow's "file mode" формат. Он предназначен для произвольного доступа в памяти. -## ArrowStream {data-format-arrow-stream} +## ArrowStream {#data-format-arrow-stream} `ArrowStream` — это Apache Arrow's "stream mode" формат. Он предназначен для обработки потоков в памяти. diff --git a/docs/ru/operations/settings/query-complexity.md b/docs/ru/operations/settings/query-complexity.md index 74c99968bc0..d228732acdf 100644 --- a/docs/ru/operations/settings/query-complexity.md +++ b/docs/ru/operations/settings/query-complexity.md @@ -56,6 +56,32 @@ Что делать, когда количество прочитанных данных превысило одно из ограничений: throw или break. По умолчанию: throw. +## max\_rows\_to\_read_leaf {#max-rows-to-read-leaf} + +Следующие ограничения могут проверяться на каждый блок (а не на каждую строку). То есть, ограничения могут быть немного нарушены. + +Максимальное количество строчек, которое можно прочитать из таблицы на удалённом сервере при выполнении +распределенного запроса. Распределенные запросы могут создавать несколько подзапросов к каждому из шардов в кластере и +тогда этот лимит будет применен при выполнении чтения на удаленных серверах (включая и сервер-инициатор) и проигнорирован +на сервере-инициаторе запроса во время обьединения полученных результатов. Например, кластер состоит из 2 шард и каждый +из них хранит таблицу с 100 строк. Тогда распределнный запрос для получения всех данных из этих таблиц и установленной +настройкой `max_rows_to_read=150` выбросит исключение, т.к. в общем он прочитает 200 строк. Но запрос +с настройкой `max_rows_to_read_leaf=150` завершится успешно, потому что каждый из шардов прочитает максимум 100 строк. + +## max\_bytes\_to\_read_leaf {#max-bytes-to-read-leaf} + +Максимальное количество байт (несжатых данных), которое можно прочитать из таблицы на удалённом сервере при +выполнении распределенного запроса. Распределенные запросы могут создавать несколько подзапросов к каждому из шардов в +кластере и тогда этот лимит будет применен при выполнении чтения на удаленных серверах (включая и сервер-инициатор) +и проигнорирован на сервере-инициаторе запроса во время обьединения полученных результатов. Например, кластер состоит +из 2 шард и каждый из них хранит таблицу со 100 байтами. Тогда распределнный запрос для получения всех данных из этих таблиц +и установленной настройкой `max_bytes_to_read=150` выбросит исключение, т.к. в общем он прочитает 200 байт. Но запрос +с настройкой `max_bytes_to_read_leaf=150` завершится успешно, потому что каждый из шардов прочитает максимум 100 байт. + +## read\_overflow\_mode_leaf {#read-overflow-mode-leaf} + +Что делать, когда количество прочитанных данных на удаленном сервере превысило одно из ограничений: throw или break. По умолчанию: throw. + ## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by} Максимальное количество уникальных ключей, получаемых в процессе агрегации. Позволяет ограничить потребление оперативки при агрегации. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 2c6e0f05fb5..21910bf7d77 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -484,7 +484,7 @@ INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), ( См. также: -- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness) +- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings) ## max\_block\_size {#setting-max_block_size} @@ -1616,6 +1616,63 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; - [Обработка значения NULL в операторе IN](../../sql-reference/operators/in.md#in-null-processing) +## low\_cardinality\_max\_dictionary\_size {#low_cardinality_max_dictionary_size} + +Задает максимальный размер общего глобального словаря (в строках) для типа данных `LowCardinality`, который может быть записан в файловую систему хранилища. Настройка предотвращает проблемы с оперативной памятью в случае неограниченного увеличения словаря. Все данные, которые не могут быть закодированы из-за ограничения максимального размера словаря, ClickHouse записывает обычным способом. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 8192. + +## low\_cardinality\_use\_single\_dictionary\_for\_part {#low_cardinality_use_single_dictionary_for_part} + +Включает или выключает использование единого словаря для куска (парта). + +По умолчанию сервер ClickHouse следит за размером словарей, и если словарь переполняется, сервер создает следующий. Чтобы запретить создание нескольких словарей, задайте настройку `low_cardinality_use_single_dictionary_for_part = 1`. + +Допустимые значения: + +- 1 — Создание нескольких словарей для частей данных запрещено. +- 0 — Создание нескольких словарей для частей данных не запрещено. + +Значение по умолчанию: 0. + +## low\_cardinality\_allow\_in\_native\_format {#low_cardinality_allow_in_native_format} + +Разрешает или запрещает использование типа данных `LowCardinality` с форматом данных [Native](../../interfaces/formats.md#native). + +Если использование типа `LowCardinality` ограничено, сервер CLickHouse преобразует столбцы `LowCardinality` в обычные столбцы для запросов `SELECT`, а обычные столбцы - в столбцы `LowCardinality` для запросов `INSERT`. + +В основном настройка используется для сторонних клиентов, не поддерживающих тип данных `LowCardinality`. + +Допустимые значения: + +- 1 — Использование `LowCardinality` не ограничено. +- 0 — Использование `LowCardinality` ограничено. + +Значение по умолчанию: 1. + +## allow\_suspicious\_low\_cardinality\_types {#allow_suspicious_low_cardinality_types} + +Разрешает или запрещает использование типа данных `LowCardinality` с типами данных с фиксированным размером 8 байт или меньше: числовые типы данных и `FixedString (8_bytes_or_less)`. + +Для небольших фиксированных значений использование `LowCardinality` обычно неэффективно, поскольку ClickHouse хранит числовой индекс для каждой строки. В результате: + +- Используется больше дискового пространства. +- Потребление ОЗУ увеличивается, в зависимости от размера словаря. +- Некоторые функции работают медленнее из-за дополнительных операций кодирования. + +Время слияния в таблицах на движке [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) также может увеличиться по описанным выше причинам. + +Допустимые значения: + +- 1 — Использование `LowCardinality` не ограничено. +- 0 — Использование `LowCardinality` ограничено. + +Значение по умолчанию: 0. + ## background_buffer_flush_schedule_pool_size {#background_buffer_flush_schedule_pool_size} Задает количество потоков для выполнения фонового сброса данных в таблицах с движком [Buffer](../../engines/table-engines/special/buffer.md). Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. @@ -1756,6 +1813,60 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; - [Секции и настройки запроса CREATE TABLE](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) (настройка `merge_with_ttl_timeout`) - [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) +## output_format_pretty_max_value_width {#output_format_pretty_max_value_width} + +Ограничивает длину значения, выводимого в формате [Pretty](../../interfaces/formats.md#pretty). Если значение длиннее указанного количества символов, оно обрезается. + +Возможные значения: + +- Положительное целое число. +- 0 — значение обрезается полностью. + +Значение по умолчанию: `10000` символов. + +**Примеры** + +Запрос: + +```sql +SET output_format_pretty_max_value_width = 10; +SELECT range(number) FROM system.numbers LIMIT 10 FORMAT PrettyCompactNoEscapes; +``` +Результат: + +```text +┌─range(number)─┐ +│ [] │ +│ [0] │ +│ [0,1] │ +│ [0,1,2] │ +│ [0,1,2,3] │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +│ [0,1,2,3,4⋯ │ +└───────────────┘ +``` + +Запрос, где длина выводимого значения ограничена 0 символов: + +```sql +SET output_format_pretty_max_value_width = 0; +SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes; +``` +Результат: + +```text +┌─range(number)─┐ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +│ ⋯ │ +└───────────────┘ +``` + ## lock_acquire_timeout {#lock_acquire_timeout} Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы. diff --git a/docs/ru/operations/system-tables/storage_policies.md b/docs/ru/operations/system-tables/storage_policies.md index f937654ab9a..df5c920b5ba 100644 --- a/docs/ru/operations/system-tables/storage_policies.md +++ b/docs/ru/operations/system-tables/storage_policies.md @@ -9,7 +9,7 @@ - `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации. - `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — имена дисков, содержащихся в политике хранения. - `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений). -- `move_factor` ([Float64](../../sql-reference/data-types/float.md))\` — доля свободного места, при превышении которой данные начинают перемещаться на следующий том. +- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице. diff --git a/docs/ru/operations/system-tables/tables.md b/docs/ru/operations/system-tables/tables.md index 7b3ea0037b8..52de10871b2 100644 --- a/docs/ru/operations/system-tables/tables.md +++ b/docs/ru/operations/system-tables/tables.md @@ -24,13 +24,16 @@ - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64)) - Общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `Null` (включая базовую таблицу `Buffer`). +- `total_rows` (Nullable(UInt64)) - общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `Null` (включая базовую таблицу `Buffer`). -- `total_bytes` (Nullable(UInt64)) - Общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `Null` (**не включает** в себя никакого базового хранилища). +- `total_bytes` (Nullable(UInt64)) - общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `Null` (**не включает** в себя никакого базового хранилища). - Если таблица хранит данные на диске, возвращает используемое пространство на диске (т. е. сжатое). - Если таблица хранит данные в памяти, возвращает приблизительное количество используемых байт в памяти. +- `lifetime_rows` (Nullable(UInt64)) - общее количество строк, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). + +- `lifetime_bytes` (Nullable(UInt64)) - общее количество байт, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md index c01636e155d..a4be18b75ec 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -4,7 +4,7 @@ toc_priority: 128 # groupBitmap {#groupbitmap} -Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../../../sql-reference/functions/bitmap-functions.md). +Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс `-State`, то возвращают [объект bitmap](../../../sql-reference/functions/bitmap-functions.md#bitmap-functions). ``` sql groupBitmap(expr) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md b/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md new file mode 100644 index 00000000000..af817ee1d04 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md @@ -0,0 +1,28 @@ +--- +toc_priority: 143 +--- + +# maxMap {#agg_functions-maxmap} + +Синтаксис: `maxMap(key, value)` or `maxMap(Tuple(key, value))` + +Вычисляет максимальные значения массива `value`, соответствующие ключам, указанным в массиве `key`. + +Передача кортежа ключей и массивов значений идентична передаче двух массивов ключей и значений. + +Количество элементов в параметрах `key` и `value` должно быть одинаковым для каждой суммируемой строки. + +Возвращает кортеж из двух массивов: ключи и значения, рассчитанные для соответствующих ключей. + +Пример: + +``` sql +SELECT maxMap(a, b) +FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1])) +``` + +``` text +┌─maxMap(a, b)──────┐ +│ ([1,2,3],[2,2,1]) │ +└───────────────────┘ +``` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/minmap.md b/docs/ru/sql-reference/aggregate-functions/reference/minmap.md new file mode 100644 index 00000000000..e6def16e583 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/minmap.md @@ -0,0 +1,28 @@ +--- +toc_priority: 142 +--- + +# minMap {#agg_functions-minmap} + +Синтаксис: `minMap(key, value)` or `minMap(Tuple(key, value))` + +Вычисляет минимальное значение массива `value` в соответствии с ключами, указанными в массиве `key`. + +Передача кортежа ключей и массивов значений идентична передаче двух массивов ключей и значений. + +Количество элементов в параметрах `key` и `value` должно быть одинаковым для каждой суммируемой строки. + +Возвращает кортеж из двух массивов: ключи в отсортированном порядке и значения, рассчитанные для соответствующих ключей. + +Пример: + +``` sql +SELECT minMap(a, b) +FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1])) +``` + +``` text +┌─minMap(a, b)──────┐ +│ ([1,2,3],[2,1,1]) │ +└───────────────────┘ +``` diff --git a/docs/ru/sql-reference/data-types/aggregatefunction.md b/docs/ru/sql-reference/data-types/aggregatefunction.md index 07983885bde..22825deb3eb 100644 --- a/docs/ru/sql-reference/data-types/aggregatefunction.md +++ b/docs/ru/sql-reference/data-types/aggregatefunction.md @@ -1,3 +1,8 @@ +--- +toc_priority: 53 +toc_title: AggregateFunction +--- + # AggregateFunction {#data-type-aggregatefunction} Агрегатные функции могут обладать определяемым реализацией промежуточным состоянием, которое может быть сериализовано в тип данных, соответствующий AggregateFunction(…), и быть записано в таблицу обычно посредством [материализованного представления] (../../sql-reference/statements/create.md#create-view). Чтобы получить промежуточное состояние, обычно используются агрегатные функции с суффиксом `-State`. Чтобы в дальнейшем получить агрегированные данные необходимо использовать те же агрегатные функции с суффиксом `-Merge`. diff --git a/docs/ru/sql-reference/data-types/array.md b/docs/ru/sql-reference/data-types/array.md index 09973d8162c..906246b66ee 100644 --- a/docs/ru/sql-reference/data-types/array.md +++ b/docs/ru/sql-reference/data-types/array.md @@ -1,3 +1,8 @@ +--- +toc_priority: 52 +toc_title: Array(T) +--- + # Array(T) {#data-type-array} Массив из элементов типа `T`. diff --git a/docs/ru/sql-reference/data-types/lowcardinality.md b/docs/ru/sql-reference/data-types/lowcardinality.md new file mode 100644 index 00000000000..ec9e4e7588e --- /dev/null +++ b/docs/ru/sql-reference/data-types/lowcardinality.md @@ -0,0 +1,59 @@ +--- +toc_priority: 51 +toc_title: LowCardinality +--- + +# LowCardinality {#lowcardinality-data-type} + +Изменяет внутреннее представление других типов данных, превращая их в тип со словарным кодированием. + +## Синтаксис {#lowcardinality-syntax} + +```sql +LowCardinality(data_type) +``` + +**Параметры** + +- `data_type` — [String](string.md), [FixedString](fixedstring.md), [Date](date.md), [DateTime](datetime.md) и числа за исключением типа [Decimal](decimal.md). `LowCardinality` неэффективен для некоторых типов данных, см. описание настройки [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types). + +## Описание {#lowcardinality-dscr} + +`LowCardinality` — это надстройка, изменяющая способ хранения и правила обработки данных. ClickHouse применяет [словарное кодирование](https://en.wikipedia.org/wiki/Dictionary_coder) в столбцы типа `LowCardinality`. Работа с данными, представленными в словарном виде, может значительно увеличивать производительность запросов [SELECT](../statements/select/index.md) для многих приложений. + +Эффективность использования типа данных `LowCarditality` зависит от разнообразия данных. Если словарь содержит менее 10 000 различных значений, ClickHouse в основном показывает более высокую эффективность чтения и хранения данных. Если же словарь содержит более 100 000 различных значений, ClickHouse может работать хуже, чем при использовании обычных типов данных. + +При работе со строками, использование `LowCardinality` вместо [Enum](enum.md). `LowCardinality` обеспечивает большую гибкость в использовании и часто показывает такую же или более высокую эффективность. + +## Пример + +Создать таблицу со столбцами типа `LowCardinality`: + +```sql +CREATE TABLE lc_t +( + `id` UInt16, + `strings` LowCardinality(String) +) +ENGINE = MergeTree() +ORDER BY id +``` + +## Связанные настройки и функции + +Настройки: + +- [low_cardinality_max_dictionary_size](../../operations/settings/settings.md#low_cardinality_max_dictionary_size) +- [low_cardinality_use_single_dictionary_for_part](../../operations/settings/settings.md#low_cardinality_use_single_dictionary_for_part) +- [low_cardinality_allow_in_native_format](../../operations/settings/settings.md#low_cardinality_allow_in_native_format) +- [allow_suspicious_low_cardinality_types](../../operations/settings/settings.md#allow_suspicious_low_cardinality_types) + +Функции: + +- [toLowCardinality](../functions/type-conversion-functions.md#tolowcardinality) + +## Смотрите также + +- [A Magical Mystery Tour of the LowCardinality Data Type](https://www.altinity.com/blog/2019/3/27/low-cardinality). +- [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). +- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). diff --git a/docs/ru/sql-reference/data-types/nullable.md b/docs/ru/sql-reference/data-types/nullable.md index 5ed99469750..71e1f7a37a0 100644 --- a/docs/ru/sql-reference/data-types/nullable.md +++ b/docs/ru/sql-reference/data-types/nullable.md @@ -1,3 +1,8 @@ +--- +toc_priority: 55 +toc_title: Nullable +--- + # Nullable(TypeName) {#data_type-nullable} Позволяет работать как со значением типа `TypeName` так и с отсутствием этого значения ([NULL](../../sql-reference/data-types/nullable.md)) в одной и той же переменной, в том числе хранить `NULL` в таблицах вместе со значения типа `TypeName`. Например, в столбце типа `Nullable(Int8)` можно хранить значения типа `Int8`, а в тех строках, где значения нет, будет храниться `NULL`. diff --git a/docs/ru/sql-reference/data-types/tuple.md b/docs/ru/sql-reference/data-types/tuple.md index 0a1089d1aef..e2a1450b47f 100644 --- a/docs/ru/sql-reference/data-types/tuple.md +++ b/docs/ru/sql-reference/data-types/tuple.md @@ -1,3 +1,8 @@ +--- +toc_priority: 54 +toc_title: Tuple(T1, T2, ...) +--- + # Tuple(T1, T2, …) {#tuplet1-t2} Кортеж из элементов любого [типа](index.md#data_types). Элементы кортежа могут быть одного или разных типов. diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index c91725c7a39..c5b0646aa79 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -1,4 +1,4 @@ -# Функции для битмапов {#funktsii-dlia-bitmapov} +# Функции для битмапов {#bitmap-functions} ## bitmapBuild {#bitmap_functions-bitmapbuild} @@ -61,8 +61,8 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) **Параметры** - `bitmap` – Битмап. [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – Начальная точка подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md). -- `cardinality_limit` – Верхний предел подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md). +- `range_start` – Начальная точка подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). +- `cardinality_limit` – Верхний предел подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). **Возвращаемое значение** @@ -97,7 +97,7 @@ bitmapContains(haystack, needle) **Параметры** - `haystack` – [объект Bitmap](#bitmap_functions-bitmapbuild), в котором функция ищет значение. -- `needle` – значение, которое функция ищет. Тип — [UInt32](../../sql-reference/functions/bitmap-functions.md). +- `needle` – значение, которое функция ищет. Тип — [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). **Возвращаемые значения** diff --git a/docs/ru/sql-reference/functions/random-functions.md b/docs/ru/sql-reference/functions/random-functions.md index 4aaaef5cb5d..21dcfeeb3c0 100644 --- a/docs/ru/sql-reference/functions/random-functions.md +++ b/docs/ru/sql-reference/functions/random-functions.md @@ -100,5 +100,6 @@ FROM numbers(3) │ a*cjab+ │ │ aeca2A │ └───────────────────────────────────────┘ +``` [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/random_functions/) diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index c7d74a9d881..9ceb105095d 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -508,11 +508,85 @@ SELECT parseDateTimeBestEffort('10 20:19') **См. также** -- \[Информация о формате ISO 8601 от @xkcd\](https://xkcd.com/1179/) +- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/) - [RFC 1123](https://tools.ietf.org/html/rfc1123) - [toDate](#todate) - [toDateTime](#todatetime) +## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS} + +Эта функция похожа на [‘parseDateTimeBestEffort’](#parsedatetimebesteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности. + +**Синтаксис** + +``` sql +parseDateTimeBestEffortUS(time_string [, time_zone]); +``` + +**Параметры** + +- `time_string` — строка, содержащая дату и время для преобразования. [String](../../sql-reference/data-types/string.md). +- `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с часовым поясом. [String](../../sql-reference/data-types/string.md). + +**Поддерживаемые нестандартные форматы** + +- Строка, содержащая 9-10 цифр [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). +- Строка, содержащая дату и время: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. +- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` etc. +- Строка, содержащая день и время: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` заменяется на `2000-01`. +- Строка, содержащая дату и время, а также информацию о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm` и т.д. Например, `2020-12-12 17:36:00 -5:00`. + +**Возвращаемое значение** + +- `time_string` преобразован в тип данных `DateTime`. + +**Примеры** + +Запрос: + +``` sql +SELECT parseDateTimeBestEffortUS('09/12/2020 12:12:57') +AS parseDateTimeBestEffortUS; +``` + +Ответ: + +``` text +┌─parseDateTimeBestEffortUS─┐ +│ 2020-09-12 12:12:57 │ +└─────────────────────────——┘ +``` + +Запрос: + +``` sql +SELECT parseDateTimeBestEffortUS('09-12-2020 12:12:57') +AS parseDateTimeBestEffortUS; +``` + +Ответ: + +``` text +┌─parseDateTimeBestEffortUS─┐ +│ 2020-09-12 12:12:57 │ +└─────────────────────────——┘ +``` + +Запрос: + +``` sql +SELECT parseDateTimeBestEffortUS('09.12.2020 12:12:57') +AS parseDateTimeBestEffortUS; +``` + +Ответ: + +``` text +┌─parseDateTimeBestEffortUS─┐ +│ 2020-09-12 12:12:57 │ +└─────────────────────────——┘ +``` + ## toUnixTimestamp64Milli ## toUnixTimestamp64Micro ## toUnixTimestamp64Nano @@ -604,4 +678,43 @@ SELECT fromUnixTimestamp64Milli(i64, 'UTC') └──────────────────────────────────────┘ ``` +## toLowCardinality {#tolowcardinality} + +Преобразует входные данные в версию [LowCardianlity](../data-types/lowcardinality.md) того же типа данных. + +Чтобы преобразовать данные из типа `LowCardinality`, используйте функцию [CAST](#type_conversion_function-cast). Например, `CAST(x as String)`. + +**Синтаксис** + +```sql +toLowCardinality(expr) +``` + +**Параметры** + +- `expr` — [Выражение](../syntax.md#syntax-expressions), которое в результате преобразуется в один из [поддерживаемых типов данных](../data-types/index.md#data_types). + + +**Возвращаемое значение** + +- Результат преобразования `expr`. + +Тип: `LowCardinality(expr_result_type)` + +**Example** + +Запрос: + +```sql +SELECT toLowCardinality('1') +``` + +Результат: + +```text +┌─toLowCardinality('1')─┐ +│ 1 │ +└───────────────────────┘ +``` + [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/type_conversion_functions/) diff --git a/docs/ru/sql-reference/statements/index.md b/docs/ru/sql-reference/statements/index.md index 4dad718d721..c7862015e64 100644 --- a/docs/ru/sql-reference/statements/index.md +++ b/docs/ru/sql-reference/statements/index.md @@ -3,4 +3,28 @@ toc_folder_title: "\u0412\u044B\u0440\u0430\u0436\u0435\u043D\u0438\u044F" toc_priority: 31 --- +# SQL выражения в ClickHouse {#clickhouse-sql-statements} +Выражения описывают различные действия, которые можно выполнить с помощью SQL запросов. Каждый вид выражения имеет свой синтаксис и особенности использования, которые описаны в соответствующих разделах документации: + +- [SELECT](../../sql-reference/statements/select/index.md) +- [INSERT INTO](../../sql-reference/statements/insert-into.md) +- [CREATE](../../sql-reference/statements/create/index.md) +- [ALTER](../../sql-reference/statements/alter/index.md) +- [SYSTEM](../../sql-reference/statements/system.md) +- [SHOW](../../sql-reference/statements/show.md) +- [GRANT](../../sql-reference/statements/grant.md) +- [REVOKE](../../sql-reference/statements/revoke.md) +- [ATTACH](../../sql-reference/statements/attach.md) +- [CHECK TABLE](../../sql-reference/statements/check-table.md) +- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md) +- [DETACH](../../sql-reference/statements/detach.md) +- [DROP](../../sql-reference/statements/drop.md) +- [EXISTS](../../sql-reference/statements/exists.md) +- [KILL](../../sql-reference/statements/kill.md) +- [OPTIMIZE](../../sql-reference/statements/optimize.md) +- [RENAME](../../sql-reference/statements/rename.md) +- [SET](../../sql-reference/statements/set.md) +- [SET ROLE](../../sql-reference/statements/set-role.md) +- [TRUNCATE](../../sql-reference/statements/truncate.md) +- [USE](../../sql-reference/statements/use.md) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 9e916489ea4..e31e43b99cd 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -18,11 +18,11 @@ Markdown==3.2.1 MarkupSafe==1.1.1 mkdocs==1.1.2 mkdocs-htmlproofer-plugin==0.0.3 -mkdocs-macros-plugin==0.4.9 +mkdocs-macros-plugin==0.4.13 nltk==3.5 nose==1.3.7 protobuf==3.13.0 -numpy==1.19.1 +numpy==1.19.2 Pygments==2.5.2 pymdown-extensions==8.0 python-slugify==4.0.1 diff --git a/docs/tools/test.py b/docs/tools/test.py index 5c0cf4b799d..d963d34df08 100755 --- a/docs/tools/test.py +++ b/docs/tools/test.py @@ -92,7 +92,7 @@ def test_single_page(input_path, lang): logging.warning('Found %d duplicate anchor points' % duplicate_anchor_points) if links_to_nowhere: - if lang == 'en': # TODO: check all languages again + if lang == 'en' or lang == 'ru': # TODO: check all languages again logging.error(f'Found {links_to_nowhere} links to nowhere in {lang}') sys.exit(1) else: diff --git a/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md index e931b6f6710..03825a41f95 100644 --- a/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -1,12 +1,15 @@ # AggregatingMergeTree {#aggregatingmergetree} -该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将相同主键的所有行(在一个数据片段内)替换为单个存储一系列聚合函数状态的行。 +该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将一个数据片段内所有具有相同主键(准确的说是 [排序键](../../../engines/table-engines/mergetree-family/mergetree.md))的行替换成一行,这一行会存储一系列聚合函数的状态。 -可以使用 `AggregatingMergeTree` 表来做增量数据统计聚合,包括物化视图的数据聚合。 +可以使用 `AggregatingMergeTree` 表来做增量数据的聚合统计,包括物化视图的数据聚合。 -引擎需使用 [AggregateFunction](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md) 类型来处理所有列。 +引擎使用以下类型来处理所有列: -如果要按一组规则来合并减少行数,则使用 `AggregatingMergeTree` 是合适的。 +- [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md) +- [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md) + +`AggregatingMergeTree` 适用于能够按照一定的规则缩减行数的情况。 ## 建表 {#jian-biao} @@ -20,10 +23,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [PARTITION BY expr] [ORDER BY expr] [SAMPLE BY expr] +[TTL expr] [SETTINGS name=value, ...] ``` -语句参数的说明,请参阅 [语句描述](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md)。 +语句参数的说明,请参阅 [建表语句描述](../../../sql-reference/statements/create.md#create-table-query)。 **子句** @@ -33,7 +37,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] 已弃用的建表方法 -!!! 注意 "注意" +!!! attention "注意" 不要在新项目中使用该方法,可能的话,请将旧项目切换到上述方法。 ``` sql @@ -45,15 +49,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) ``` -上面的所有参数跟 `MergeTree` 中的一样。 +上面的所有参数的含义跟 `MergeTree` 中的一样。 ## SELECT 和 INSERT {#select-he-insert} -插入数据,需使用带有聚合 -State- 函数的 [INSERT SELECT](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md) 语句。 +要插入数据,需使用带有 -State- 聚合函数的 [INSERT SELECT](../../../sql-reference/statements/insert-into.md) 语句。 从 `AggregatingMergeTree` 表中查询数据时,需使用 `GROUP BY` 子句并且要使用与插入时相同的聚合函数,但后缀要改为 `-Merge` 。 -在 `SELECT` 查询的结果中,对于 ClickHouse 的所有输出格式 `AggregateFunction` 类型的值都实现了特定的二进制表示法。如果直接用 `SELECT` 导出这些数据,例如如用 `TabSeparated` 格式,那么这些导出数据也能直接用 `INSERT` 语句加载导入。 +对于 `SELECT` 查询的结果, `AggregateFunction` 类型的值对 ClickHouse 的所有输出格式都实现了特定的二进制表示法。在进行数据转储时,例如使用 `TabSeparated` 格式进行 `SELECT` 查询,那么这些转储数据也能直接用 `INSERT` 语句导回。 ## 聚合物化视图的示例 {#ju-he-wu-hua-shi-tu-de-shi-li} diff --git a/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md index d7653ca05d6..cf3ac76c8ce 100644 --- a/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -2,9 +2,9 @@ [MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](../special/materializedview.md#materializedview) 也支持分区。 -一个分区是指按指定规则逻辑组合一起的表的记录集。可以按任意标准进行分区,如按月,按日或按事件类型。为了减少需要操作的数据,每个分区都是分开存储的。访问数据时,ClickHouse 尽量使用这些分区的最小子集。 +分区是在一个表中通过指定的规则划分而成的逻辑数据集。可以按任意标准进行分区,如按月,按日或按事件类型。为了减少需要操作的数据,每个分区都是分开存储的。访问数据时,ClickHouse 尽量使用这些分区的最小子集。 -分区是在 [建表](mergetree.md#table_engine-mergetree-creating-a-table) 的 `PARTITION BY expr` 子句中指定。分区键可以是关于列的任何表达式。例如,指定按月分区,表达式为 `toYYYYMM(date_column)`: +分区是在 [建表](mergetree.md#table_engine-mergetree-creating-a-table) 时通过 `PARTITION BY expr` 子句指定的。分区键可以是表中列的任意表达式。例如,指定按月分区,表达式为 `toYYYYMM(date_column)`: ``` sql CREATE TABLE visits @@ -30,10 +30,10 @@ ORDER BY (CounterID, StartDate, intHash32(UserID)); 新数据插入到表中时,这些数据会存储为按主键排序的新片段(块)。插入后 10-15 分钟,同一分区的各个片段会合并为一整个片段。 -!!! attention "注意" - 那些有相同分区表达式值的数据片段才会合并。这意味着 **你不应该用太精细的分区方案**(超过一千个分区)。否则,会因为文件系统中的文件数量和需要找开的文件描述符过多,导致 `SELECT` 查询效率不佳。 +!!! info "注意" + 那些有相同分区表达式值的数据片段才会合并。这意味着 **你不应该用太精细的分区方案**(超过一千个分区)。否则,会因为文件系统中的文件数量过多和需要打开的文件描述符过多,导致 `SELECT` 查询效率不佳。 -可以通过 [系统。零件](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: +可以通过 [system.parts](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: ``` sql SELECT @@ -44,55 +44,59 @@ FROM system.parts WHERE table = 'visits' ``` - ┌─partition─┬─name───────────┬─active─┐ - │ 201901 │ 201901_1_3_1 │ 0 │ - │ 201901 │ 201901_1_9_2 │ 1 │ - │ 201901 │ 201901_8_8_0 │ 0 │ - │ 201901 │ 201901_9_9_0 │ 0 │ - │ 201902 │ 201902_4_6_1 │ 1 │ - │ 201902 │ 201902_10_10_0 │ 1 │ - │ 201902 │ 201902_11_11_0 │ 1 │ - └───────────┴────────────────┴────────┘ +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 1 │ +│ 201902 │ 201902_11_11_0 │ 1 │ +└───────────┴────────────────┴────────┘ +``` `partition` 列存储分区的名称。此示例中有两个分区:`201901` 和 `201902`。在 [ALTER … PARTITION](#alter_manipulations-with-partitions) 语句中你可以使用该列值来指定分区名称。 `name` 列为分区中数据片段的名称。在 [ALTER ATTACH PART](#alter_attach-partition) 语句中你可以使用此列值中来指定片段名称。 -这里我们拆解下第一部分的名称:`201901_1_3_1`: +这里我们拆解下第一个数据片段的名称:`201901_1_3_1`: - `201901` 是分区名称。 - `1` 是数据块的最小编号。 - `3` 是数据块的最大编号。 - `1` 是块级别(即在由块组成的合并树中,该块在树中的深度)。 -!!! attention "注意" +!!! info "注意" 旧类型表的片段名称为:`20190117_20190123_2_2_0`(最小日期 - 最大日期 - 最小块编号 - 最大块编号 - 块级别)。 -`active` 列为片段状态。`1` 激活状态;`0` 非激活状态。非激活片段是那些在合并到较大片段之后剩余的源数据片段。损坏的数据片段也表示为非活动状态。 +`active` 列为片段状态。`1` 代表激活状态;`0` 代表非激活状态。非激活片段是那些在合并到较大片段之后剩余的源数据片段。损坏的数据片段也表示为非活动状态。 -正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 大约在插入后15分钟定期报告合并操作,合并插入的数据片段。此外,你也可以使用 [OPTIMIZE](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#misc_operations-optimize) 语句直接执行合并。例: +正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 会定期的对插入的数据片段进行合并,大约是在插入后15分钟左右。此外,你也可以使用 [OPTIMIZE](../../../sql-reference/statements/misc.md#misc_operations-optimize) 语句发起一个计划外的合并。例如: ``` sql OPTIMIZE TABLE visits PARTITION 201902; ``` - ┌─partition─┬─name───────────┬─active─┐ - │ 201901 │ 201901_1_3_1 │ 0 │ - │ 201901 │ 201901_1_9_2 │ 1 │ - │ 201901 │ 201901_8_8_0 │ 0 │ - │ 201901 │ 201901_9_9_0 │ 0 │ - │ 201902 │ 201902_4_6_1 │ 0 │ - │ 201902 │ 201902_4_11_2 │ 1 │ - │ 201902 │ 201902_10_10_0 │ 0 │ - │ 201902 │ 201902_11_11_0 │ 0 │ - └───────────┴────────────────┴────────┘ +``` +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 0 │ +│ 201902 │ 201902_4_11_2 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 0 │ +│ 201902 │ 201902_11_11_0 │ 0 │ +└───────────┴────────────────┴────────┘ +``` -非激活片段会在合并后的10分钟左右删除。 +非激活片段会在合并后的10分钟左右被删除。 查看片段和分区信息的另一种方法是进入表的目录:`/var/lib/clickhouse/data//
/`。例如: ``` bash -dev:/var/lib/clickhouse/data/default/visits$ ls -l +/var/lib/clickhouse/data/default/visits$ ls -l total 40 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 @@ -105,12 +109,12 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached ``` -文件夹 ‘201901\_1\_1\_0’,‘201901\_1\_7\_1’ 等是片段的目录。每个片段都与一个对应的分区相关,并且只包含这个月的数据(本例中的表按月分区)。 +‘201901\_1\_1\_0’,‘201901\_1\_7\_1’ 等文件夹是数据片段的目录。每个片段都与一个对应的分区相关,并且只包含这个月的数据(本例中的表按月分区)。 -`detached` 目录存放着使用 [DETACH](../../../sql-reference/statements/alter.md#alter_detach-partition) 语句从表中分离的片段。损坏的片段也会移到该目录,而不是删除。服务器不使用`detached`目录中的片段。可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_attach-partition) 语句前,服务器不会感知到。 +`detached` 目录存放着使用 [DETACH](../../../sql-reference/statements/alter.md#alter_detach-partition) 语句从表中卸载的片段。损坏的片段不会被删除而是也会移到该目录下。服务器不会去使用`detached`目录中的数据片段。因此你可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../../sql-reference/statements/alter.md#alter_attach-partition) 语句前,服务器不会感知到。 注意,在操作服务器时,你不能手动更改文件系统上的片段集或其数据,因为服务器不会感知到这些修改。对于非复制表,可以在服务器停止时执行这些操作,但不建议这样做。对于复制表,在任何情况下都不要更改片段文件。 -ClickHouse 支持对分区执行这些操作:删除分区,从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_manipulations-with-partitions) 一节。 +ClickHouse 支持对分区执行这些操作:删除分区,将分区从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../../sql-reference/statements/alter.md#alter_manipulations-with-partitions) 一节。 [来源文章](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md index 626597eeaf0..73328015ea9 100644 --- a/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md @@ -1,8 +1,8 @@ -# 替换合并树 {#replacingmergetree} +# ReplacingMergeTree {#replacingmergetree} -该引擎和[MergeTree](mergetree.md)的不同之处在于它会删除具有相同主键的重复项。 +该引擎和 [MergeTree](mergetree.md) 的不同之处在于它会删除排序键值相同的重复项。 -数据的去重只会在合并的过程中出现。合并会在未知的时间在后台进行,因此你无法预先作出计划。有一些数据可能仍未被处理。尽管你可以调用 `OPTIMIZE` 语句发起计划外的合并,但请不要指望使用它,因为 `OPTIMIZE` 语句会引发对大量数据的读和写。 +数据的去重只会在数据合并期间进行。合并会在后台一个不确定的时间进行,因此你无法预先作出计划。有一些数据可能仍未被处理。尽管你可以调用 `OPTIMIZE` 语句发起计划外的合并,但请不要依靠它,因为 `OPTIMIZE` 语句会引发对数据的大量读写。 因此,`ReplacingMergeTree` 适用于在后台清除重复的数据以节省空间,但是它不保证没有重复的数据出现。 @@ -21,19 +21,20 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -请求参数的描述,参考[请求参数](../../../engines/table-engines/mergetree-family/replacingmergetree.md)。 +有关建表参数的描述,可参考 [创建表](../../../sql-reference/statements/create.md#create-table-query)。 -**参数** +**ReplacingMergeTree 的参数** - `ver` — 版本列。类型为 `UInt*`, `Date` 或 `DateTime`。可选参数。 - 合并的时候,`ReplacingMergeTree` 从所有具有相同主键的行中选择一行留下: - - 如果 `ver` 列未指定,选择最后一条。 - - 如果 `ver` 列已指定,选择 `ver` 值最大的版本。 + 在数据合并的时候,`ReplacingMergeTree` 从所有具有相同排序键的行中选择一行留下: + + - 如果 `ver` 列未指定,保留最后一条。 + - 如果 `ver` 列已指定,保留 `ver` 值最大的版本。 **子句** -创建 `ReplacingMergeTree` 表时,需要与创建 `MergeTree` 表时相同的[子句](mergetree.md)。 +创建 `ReplacingMergeTree` 表时,需要使用与创建 `MergeTree` 表时相同的 [子句](mergetree.md)。
diff --git a/docs/zh/getting-started/tutorial.md b/docs/zh/getting-started/tutorial.md index 43c7ed0ec59..2a82911cce4 100644 --- a/docs/zh/getting-started/tutorial.md +++ b/docs/zh/getting-started/tutorial.md @@ -80,7 +80,7 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv ## 导入示例数据集 {#import-sample-dataset} -现在是时候用一些示例数据填充我们的ClickHouse服务器。 在本教程中,我们将使用Yandex的匿名数据。Metrica,在成为开源之前以生产方式运行ClickHouse的第一个服务(更多关于这一点 [历史科](../introduction/history.md)). 有 [多种导入Yandex的方式。梅里卡数据集](example-datasets/metrica.md),为了本教程,我们将使用最现实的一个。 +现在是时候用一些示例数据填充我们的ClickHouse服务端。 在本教程中,我们将使用Yandex.Metrica的匿名数据,它是在ClickHouse成为开源之前作为生产环境运行的第一个服务(关于这一点的更多内容请参阅[ClickHouse历史](../introduction/history.md))。有 [多种导入Yandex.Metrica数据集的的方法](example-datasets/metrica.md),为了本教程,我们将使用最现实的一个。 ### 下载并提取表数据 {#download-and-extract-table-data} @@ -93,22 +93,22 @@ curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unx ### 创建表 {#create-tables} -与大多数数据库管理系统一样,ClickHouse在逻辑上将表分组为 “databases”. 有一个 `default` 数据库,但我们将创建一个名为新的 `tutorial`: +与大多数数据库管理系统一样,ClickHouse在逻辑上将表分组为数据库。包含一个 `default` 数据库,但我们将创建一个新的数据库 `tutorial`: ``` bash clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -与数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql-reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情: +与创建数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql-reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情: 1. 要创建的表的名称。 -2. Table schema, i.e. list of columns and their [数据类型](../sql-reference/data-types/index.md). -3. [表引擎](../engines/table-engines/index.md) 及其设置,这决定了如何物理执行对此表的查询的所有细节。 +2. 表结构,例如:列名和对应的[数据类型](../sql-reference/data-types/index.md)。 +3. [表引擎](../engines/table-engines/index.md) 及其设置,这决定了对此表的查询操作是如何在物理层面执行的所有细节。 -YandexMetrica是一个网络分析服务,样本数据集不包括其全部功能,因此只有两个表可以创建: +Yandex.Metrica是一个网络分析服务,样本数据集不包括其全部功能,因此只有两个表可以创建: -- `hits` 是一个表格,其中包含所有用户在服务所涵盖的所有网站上完成的每个操作。 -- `visits` 是一个包含预先构建的会话而不是单个操作的表。 +- `hits` 表包含所有用户在服务所涵盖的所有网站上完成的每个操作。 +- `visits` 表包含预先构建的会话,而不是单个操作。 让我们看看并执行这些表的实际创建表查询: @@ -453,9 +453,9 @@ SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192 ``` -您可以使用以下交互模式执行这些查询 `clickhouse-client` (只需在终端中启动它,而不需要提前指定查询)或尝试一些 [替代接口](../interfaces/index.md) 如果你愿意的话 +您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。 -正如我们所看到的, `hits_v1` 使用 [基本MergeTree引擎](../engines/table-engines/mergetree-family/mergetree.md),而 `visits_v1` 使用 [崩溃](../engines/table-engines/mergetree-family/collapsingmergetree.md) 变体。 +正如我们所看到的, `hits_v1` 使用 [基本的MergeTree引擎](../engines/table-engines/mergetree-family/mergetree.md),而 `visits_v1` 使用 [折叠树](../engines/table-engines/mergetree-family/collapsingmergetree.md) 变体。 ### 导入数据 {#import-data} diff --git a/docs/zh/introduction/history.md b/docs/zh/introduction/history.md index 1871bd75588..29c8c263f9f 100644 --- a/docs/zh/introduction/history.md +++ b/docs/zh/introduction/history.md @@ -13,7 +13,7 @@ Yandex.Metrica基于用户定义的字段,对实时访问、连接会话,生 ClickHouse还被使用在: -- 存储来自Yandex.Metrica回话重放数据。 +- 存储来自Yandex.Metrica的会话重放数据。 - 处理中间数据 - 与Analytics一起构建全球报表。 - 为调试Yandex.Metrica引擎运行查询 diff --git a/docs/zh/sql-reference/aggregate-functions/index.md b/docs/zh/sql-reference/aggregate-functions/index.md index 57d8e362d99..436a8f433ea 100644 --- a/docs/zh/sql-reference/aggregate-functions/index.md +++ b/docs/zh/sql-reference/aggregate-functions/index.md @@ -1,6 +1,6 @@ --- toc_priority: 33 -toc_title: 简介 +toc_title: 聚合函数 --- # 聚合函数 {#aggregate-functions} diff --git a/docs/zh/sql-reference/functions/conditional-functions.md b/docs/zh/sql-reference/functions/conditional-functions.md index eabe253ab1c..265c4387cb1 100644 --- a/docs/zh/sql-reference/functions/conditional-functions.md +++ b/docs/zh/sql-reference/functions/conditional-functions.md @@ -34,7 +34,7 @@ │ 2 │ 3 │ └───┴──────┘ -执行查询 `SELECT multiIf(isNull(y) x, y < 3, y, NULL) FROM t_null`。结果: +执行查询 `SELECT multiIf(isNull(y), x, y < 3, y, NULL) FROM t_null`。结果: ┌─multiIf(isNull(y), x, less(y, 3), y, NULL)─┐ │ 1 │ diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 996f094e13e..842f8ebaaa1 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1167,6 +1167,9 @@ private: dump_of_cloned_ast.str().c_str()); fprintf(stderr, "dump after fuzz:\n"); fuzz_base->dumpTree(std::cerr); + + fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); + assert(false); } @@ -1504,7 +1507,18 @@ private: { /// Send data contained in the query. ReadBufferFromMemory data_in(parsed_insert_query->data, parsed_insert_query->end - parsed_insert_query->data); - sendDataFrom(data_in, sample, columns_description); + try + { + sendDataFrom(data_in, sample, columns_description); + } + catch (Exception & e) + { + /// The following query will use data from input + // "INSERT INTO data FORMAT TSV\n " < data.csv + // And may be pretty hard to debug, so add information about data source to make it easier. + e.addMessage("data for INSERT was parsed from query"); + throw; + } // Remember where the data ended. We use this info later to determine // where the next query begins. parsed_insert_query->end = data_in.buffer().begin() + data_in.count(); @@ -1512,7 +1526,15 @@ private: else if (!is_interactive) { /// Send data read from stdin. - sendDataFrom(std_in, sample, columns_description); + try + { + sendDataFrom(std_in, sample, columns_description); + } + catch (Exception & e) + { + e.addMessage("data for INSERT was parsed from stdin"); + throw; + } } else throw Exception("No data to insert", ErrorCodes::NO_DATA_TO_INSERT); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index da782302eb9..6d47e7e0364 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,6 @@ #include #include #include -#include #include #include #include @@ -90,6 +90,23 @@ namespace CurrentMetrics extern const Metric MemoryTracking; } + +int mainEntryClickHouseServer(int argc, char ** argv) +{ + DB::Server app; + try + { + return app.run(argc, argv); + } + catch (...) + { + std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; + auto code = DB::getCurrentExceptionCode(); + return code ? code : 1; + } +} + + namespace { @@ -280,6 +297,11 @@ int Server::main(const std::vector & /*args*/) global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::SERVER); + // Initialize global thread pool. Do it before we fetch configs from zookeeper + // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will + // ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well. + GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 10000)); + bool has_zookeeper = config().has("zookeeper"); zkutil::ZooKeeperNodeCache main_config_zk_node_cache([&] { return global_context->getZooKeeper(); }); @@ -317,11 +339,16 @@ int Server::main(const std::vector & /*args*/) { if (hasLinuxCapability(CAP_IPC_LOCK)) { - LOG_TRACE(log, "Will mlockall to prevent executable memory from being paged out. It may take a few seconds."); - if (0 != mlockall(MCL_CURRENT)) - LOG_WARNING(log, "Failed mlockall: {}", errnoToString(ErrorCodes::SYSTEM_ERROR)); + /// Get the memory area with (current) code segment. + /// It's better to lock only the code segment instead of calling "mlockall", + /// because otherwise debug info will be also locked in memory, and it can be huge. + auto [addr, len] = getMappedArea(reinterpret_cast(mainEntryClickHouseServer)); + + LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds."); + if (0 != mlock(addr, len)) + LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR)); else - LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed"); + LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len)); } else { @@ -414,9 +441,6 @@ int Server::main(const std::vector & /*args*/) DateLUT::instance(); LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone()); - /// Initialize global thread pool - GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 10000)); - /// Storage with temporary data for processing of heavy queries. { std::string tmp_path = config().getString("tmp_path", path + "tmp/"); @@ -607,6 +631,7 @@ int Server::main(const std::vector & /*args*/) /// Check sanity of MergeTreeSettings on server startup global_context->getMergeTreeSettings().sanityCheck(settings); + global_context->getReplicatedMergeTreeSettings().sanityCheck(settings); /// Limit on total memory usage size_t max_server_memory_usage = config().getUInt64("max_server_memory_usage", 0); @@ -719,7 +744,10 @@ int Server::main(const std::vector & /*args*/) { /// DDL worker should be started after all tables were loaded String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/"); - global_context->setDDLWorker(std::make_unique(ddl_zookeeper_path, *global_context, &config(), "distributed_ddl")); + int pool_size = config().getInt("distributed_ddl.pool_size", 1); + if (pool_size < 1) + throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND); + global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, *global_context, &config(), "distributed_ddl")); } std::unique_ptr dns_cache_updater; @@ -1135,21 +1163,3 @@ int Server::main(const std::vector & /*args*/) return Application::EXIT_OK; } } - -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" - -int mainEntryClickHouseServer(int argc, char ** argv) -{ - DB::Server app; - try - { - return app.run(argc, argv); - } - catch (...) - { - std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; - auto code = DB::getCurrentExceptionCode(); - return code ? code : 1; - } -} diff --git a/programs/server/config.xml b/programs/server/config.xml index dfc0d106154..b3269f2e842 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -671,6 +671,9 @@ + + + diff --git a/release b/release index ec762b234fb..b20683a9caa 100755 --- a/release +++ b/release @@ -106,7 +106,7 @@ elif [[ $BUILD_TYPE == 'debug' ]]; then VERSION_POSTFIX+="+debug" fi -CMAKE_FLAGS=" $MALLOC_OPTS -DSANITIZE=$SANITIZER $CMAKE_FLAGS" +CMAKE_FLAGS=" $MALLOC_OPTS -DSANITIZE=$SANITIZER -DENABLE_CHECK_HEAVY_BUILDS=1 $CMAKE_FLAGS" [[ -n "$CMAKE_BUILD_TYPE" ]] && CMAKE_FLAGS=" -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE $CMAKE_FLAGS" export CMAKE_FLAGS diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index 93a6d2dd255..ecbb02966f0 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -339,6 +339,11 @@ void AccessControlManager::addStoragesFromMainConfig( } +UUID AccessControlManager::login(const String & user_name, const String & password, const Poco::Net::IPAddress & address) const +{ + return MultipleAccessStorage::login(user_name, password, address, *external_authenticators); +} + void AccessControlManager::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config) { external_authenticators->setConfig(config, getLogger()); diff --git a/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h index d7cf59cfb28..81a66ce8f1d 100644 --- a/src/Access/AccessControlManager.h +++ b/src/Access/AccessControlManager.h @@ -106,6 +106,7 @@ public: bool isSettingNameAllowed(const std::string_view & name) const; void checkSettingNameIsAllowed(const std::string_view & name) const; + UUID login(const String & user_name, const String & password, const Poco::Net::IPAddress & address) const; void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config); std::shared_ptr getContextAccess( diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index d5e48baf110..fbc69e94a43 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -288,23 +288,6 @@ void ContextAccess::calculateAccessRights() const } -bool ContextAccess::isCorrectPassword(const String & password) const -{ - std::lock_guard lock{mutex}; - if (!user) - return false; - return user->authentication.isCorrectPassword(password, user_name, manager->getExternalAuthenticators()); -} - -bool ContextAccess::isClientHostAllowed() const -{ - std::lock_guard lock{mutex}; - if (!user) - return false; - return user->allowed_client_hosts.contains(params.address); -} - - UserPtr ContextAccess::getUser() const { std::lock_guard lock{mutex}; diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index 9a5758b79a6..b42d50c3739 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -63,9 +63,6 @@ public: UserPtr getUser() const; String getUserName() const; - bool isCorrectPassword(const String & password) const; - bool isClientHostAllowed() const; - /// Returns information about current and enabled roles. /// The function can return nullptr. std::shared_ptr getRolesInfo() const; diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 874ae612034..e5170221e18 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -13,6 +14,7 @@ namespace ErrorCodes extern const int ACCESS_ENTITY_ALREADY_EXISTS; extern const int ACCESS_ENTITY_NOT_FOUND; extern const int ACCESS_STORAGE_READONLY; + extern const int AUTHENTICATION_FAILED; extern const int LOGICAL_ERROR; } @@ -412,6 +414,57 @@ void IAccessStorage::notify(const Notifications & notifications) } +UUID IAccessStorage::login( + const String & user_name, + const String & password, + const Poco::Net::IPAddress & address, + const ExternalAuthenticators & external_authenticators) const +{ + return loginImpl(user_name, password, address, external_authenticators); +} + + +UUID IAccessStorage::loginImpl( + const String & user_name, + const String & password, + const Poco::Net::IPAddress & address, + const ExternalAuthenticators & external_authenticators) const +{ + if (auto id = find(user_name)) + { + if (auto user = tryRead(*id)) + { + if (isPasswordCorrectImpl(*user, password, external_authenticators) && isAddressAllowedImpl(*user, address)) + return *id; + } + } + throwCannotAuthenticate(user_name); +} + + +bool IAccessStorage::isPasswordCorrectImpl(const User & user, const String & password, const ExternalAuthenticators & external_authenticators) const +{ + return user.authentication.isCorrectPassword(password, user.getName(), external_authenticators); +} + + +bool IAccessStorage::isAddressAllowedImpl(const User & user, const Poco::Net::IPAddress & address) const +{ + return user.allowed_client_hosts.contains(address); +} + +UUID IAccessStorage::getIDOfLoggedUser(const String & user_name) const +{ + return getIDOfLoggedUserImpl(user_name); +} + + +UUID IAccessStorage::getIDOfLoggedUserImpl(const String & user_name) const +{ + return getID(user_name); +} + + UUID IAccessStorage::generateRandomID() { static Poco::UUIDGenerator generator; @@ -500,4 +553,13 @@ void IAccessStorage::throwReadonlyCannotRemove(EntityType type, const String & n "Cannot remove " + outputEntityTypeAndName(type, name) + " from " + getStorageName() + " because this storage is readonly", ErrorCodes::ACCESS_STORAGE_READONLY); } + + +void IAccessStorage::throwCannotAuthenticate(const String & user_name) +{ + /// We use the same message for all authentification failures because we don't want to give away any unnecessary information for security reasons, + /// only the log will show the exact reason. + throw Exception(user_name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED); +} + } diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index 7dc2fd69335..5a86e817fb2 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -11,9 +11,13 @@ namespace Poco { class Logger; } +namespace Poco::Net { class IPAddress; } namespace DB { +struct User; +class ExternalAuthenticators; + /// Contains entities, i.e. instances of classes derived from IAccessEntity. /// The implementations of this class MUST be thread-safe. class IAccessStorage @@ -138,6 +142,14 @@ public: bool hasSubscription(EntityType type) const; bool hasSubscription(const UUID & id) const; + /// Finds an user, check its password and returns the ID of the user. + /// Throws an exception if no such user or password is incorrect. + UUID login(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const; + + /// Returns the ID of an user who has logged in (maybe on another node). + /// The function assumes that the password has been already checked somehow, so we can skip checking it now. + UUID getIDOfLoggedUser(const String & user_name) const; + protected: virtual std::optional findImpl(EntityType type, const String & name) const = 0; virtual std::vector findAllImpl(EntityType type) const = 0; @@ -152,6 +164,10 @@ protected: virtual ext::scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const = 0; virtual bool hasSubscriptionImpl(const UUID & id) const = 0; virtual bool hasSubscriptionImpl(EntityType type) const = 0; + virtual UUID loginImpl(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const; + virtual bool isPasswordCorrectImpl(const User & user, const String & password, const ExternalAuthenticators & external_authenticators) const; + virtual bool isAddressAllowedImpl(const User & user, const Poco::Net::IPAddress & address) const; + virtual UUID getIDOfLoggedUserImpl(const String & user_name) const; static UUID generateRandomID(); Poco::Logger * getLogger() const; @@ -166,6 +182,7 @@ protected: [[noreturn]] void throwReadonlyCannotInsert(EntityType type, const String & name) const; [[noreturn]] void throwReadonlyCannotUpdate(EntityType type, const String & name) const; [[noreturn]] void throwReadonlyCannotRemove(EntityType type, const String & name) const; + [[noreturn]] static void throwCannotAuthenticate(const String & user_name); using Notification = std::tuple; using Notifications = std::vector; diff --git a/src/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp index bf711b54d54..8ddc7410d8d 100644 --- a/src/Access/MultipleAccessStorage.cpp +++ b/src/Access/MultipleAccessStorage.cpp @@ -392,4 +392,58 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock added_subscriptions->clear(); } + +UUID MultipleAccessStorage::loginImpl(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const +{ + auto storages = getStoragesInternal(); + for (const auto & storage : *storages) + { + try + { + auto id = storage->login(user_name, password, address, external_authenticators); + std::lock_guard lock{mutex}; + ids_cache.set(id, storage); + return id; + } + catch (...) + { + if (!storage->find(EntityType::USER, user_name)) + { + /// The authentication failed because there no users with such name in the `storage` + /// thus we can try to search in other nested storages. + continue; + } + throw; + } + } + throwCannotAuthenticate(user_name); +} + + +UUID MultipleAccessStorage::getIDOfLoggedUserImpl(const String & user_name) const +{ + auto storages = getStoragesInternal(); + for (const auto & storage : *storages) + { + try + { + auto id = storage->getIDOfLoggedUser(user_name); + std::lock_guard lock{mutex}; + ids_cache.set(id, storage); + return id; + } + catch (...) + { + if (!storage->find(EntityType::USER, user_name)) + { + /// The authentication failed because there no users with such name in the `storage` + /// thus we can try to search in other nested storages. + continue; + } + throw; + } + } + throwNotFound(EntityType::USER, user_name); +} + } diff --git a/src/Access/MultipleAccessStorage.h b/src/Access/MultipleAccessStorage.h index 5d01894621f..36551f1cbc8 100644 --- a/src/Access/MultipleAccessStorage.h +++ b/src/Access/MultipleAccessStorage.h @@ -47,6 +47,8 @@ protected: ext::scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; bool hasSubscriptionImpl(EntityType type) const override; + UUID loginImpl(const String & user_name, const String & password, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override; + UUID getIDOfLoggedUserImpl(const String & user_name) const override; private: using Storages = std::vector; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b6e8c395b26..0016c51b7f8 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -67,6 +67,7 @@ set(dbms_sources) add_headers_and_sources(clickhouse_common_io Common) add_headers_and_sources(clickhouse_common_io Common/HashTable) add_headers_and_sources(clickhouse_common_io IO) +add_headers_and_sources(clickhouse_common_io IO/S3) list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp) if(USE_RDKAFKA) @@ -378,11 +379,6 @@ if (USE_BROTLI) target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) endif() -if (USE_OPENCL) - target_link_libraries (clickhouse_common_io PRIVATE ${OpenCL_LIBRARIES}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${OpenCL_INCLUDE_DIRS}) -endif () - if (USE_CASSANDRA) dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY}) dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR}) diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index 6d6af794a07..68f4bcd1b76 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -56,6 +56,9 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts return tryGetEntry(pool, timeouts, fail_message, settings); }; + size_t offset = 0; + if (settings) + offset = settings->load_balancing_first_offset % nested_pools.size(); GetPriorityFunc get_priority; switch (settings ? LoadBalancing(settings->load_balancing) : default_load_balancing) { @@ -68,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts case LoadBalancing::RANDOM: break; case LoadBalancing::FIRST_OR_RANDOM: - get_priority = [](size_t i) -> size_t { return i >= 1; }; + get_priority = [offset](size_t i) -> size_t { return i != offset; }; break; case LoadBalancing::ROUND_ROBIN: if (last_used >= nested_pools.size()) @@ -190,6 +193,9 @@ std::vector ConnectionPoolWithFailover::g else throw DB::Exception("Unknown pool allocation mode", DB::ErrorCodes::LOGICAL_ERROR); + size_t offset = 0; + if (settings) + offset = settings->load_balancing_first_offset % nested_pools.size(); GetPriorityFunc get_priority; switch (settings ? LoadBalancing(settings->load_balancing) : default_load_balancing) { @@ -202,7 +208,7 @@ std::vector ConnectionPoolWithFailover::g case LoadBalancing::RANDOM: break; case LoadBalancing::FIRST_OR_RANDOM: - get_priority = [](size_t i) -> size_t { return i >= 1; }; + get_priority = [offset](size_t i) -> size_t { return i != offset; }; break; case LoadBalancing::ROUND_ROBIN: if (last_used >= nested_pools.size()) diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 58543d6a4dd..e4d17c586ac 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -781,18 +781,21 @@ void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_h void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_range) const { + if (equal_range.empty()) + return; + if (limit >= size() || limit >= equal_range.back().second) limit = 0; - size_t n = equal_range.size(); + size_t number_of_ranges = equal_range.size(); if (limit) - --n; + --number_of_ranges; EqualRanges new_ranges; - for (size_t i = 0; i < n; ++i) + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto& [first, last] = equal_range[i]; + const auto & [first, last] = equal_range[i]; if (reverse) std::sort(res.begin() + first, res.begin() + last, Less(*this, nan_direction_hint)); @@ -817,7 +820,13 @@ void ColumnArray::updatePermutation(bool reverse, size_t limit, int nan_directio if (limit) { - const auto& [first, last] = equal_range.back(); + const auto & [first, last] = equal_range.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, Less(*this, nan_direction_hint)); else diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index 34a01febc73..b9549175f6c 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -7,6 +7,7 @@ #include #include +#include #include @@ -142,25 +143,31 @@ void ColumnDecimal::getPermutation(bool reverse, size_t limit, int , IColumn: } template -void ColumnDecimal::updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_range) const +void ColumnDecimal::updatePermutation(bool reverse, size_t limit, int, IColumn::Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= data.size() || limit >= equal_range.back().second) + if (equal_ranges.empty()) + return; + + if (limit >= data.size() || limit >= equal_ranges.back().second) limit = 0; - size_t n = equal_range.size(); + size_t number_of_ranges = equal_ranges.size(); if (limit) - --n; + --number_of_ranges; EqualRanges new_ranges; - for (size_t i = 0; i < n; ++i) + SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); + + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto& [first, last] = equal_range[i]; + const auto& [first, last] = equal_ranges[i]; if (reverse) std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last, [this](size_t a, size_t b) { return data[a] > data[b]; }); else std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last, [this](size_t a, size_t b) { return data[a] < data[b]; }); + auto new_first = first; for (auto j = first + 1; j < last; ++j) { @@ -178,13 +185,20 @@ void ColumnDecimal::updatePermutation(bool reverse, size_t limit, int, IColum if (limit) { - const auto& [first, last] = equal_range.back(); + const auto & [first, last] = equal_ranges.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this](size_t a, size_t b) { return data[a] > data[b]; }); else std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this](size_t a, size_t b) { return data[a] < data[b]; }); + auto new_first = first; for (auto j = first + 1; j < limit; ++j) { @@ -208,7 +222,6 @@ void ColumnDecimal::updatePermutation(bool reverse, size_t limit, int, IColum if (new_last - new_first > 1) new_ranges.emplace_back(new_first, new_last); } - equal_range = std::move(new_ranges); } template diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 95a477e54cf..0e44b83791c 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -9,6 +9,8 @@ #include #include +#include + #include #include @@ -168,24 +170,29 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir } } -void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_range) const +void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= size() || limit >= equal_range.back().second) + if (equal_ranges.empty()) + return; + + if (limit >= size() || limit >= equal_ranges.back().second) limit = 0; - size_t k = equal_range.size(); + size_t number_of_ranges = equal_ranges.size(); if (limit) - --k; + --number_of_ranges; EqualRanges new_ranges; + SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); - for (size_t i = 0; i < k; ++i) + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto& [first, last] = equal_range[i]; + const auto& [first, last] = equal_ranges[i]; if (reverse) std::sort(res.begin() + first, res.begin() + last, less(*this)); else std::sort(res.begin() + first, res.begin() + last, less(*this)); + auto new_first = first; for (auto j = first + 1; j < last; ++j) { @@ -202,11 +209,18 @@ void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permu } if (limit) { - const auto& [first, last] = equal_range.back(); + const auto & [first, last] = equal_ranges.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this)); else std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this)); + auto new_first = first; for (auto j = first + 1; j < limit; ++j) { @@ -230,7 +244,6 @@ void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permu if (new_last - new_first > 1) new_ranges.emplace_back(new_first, new_last); } - equal_range = std::move(new_ranges); } void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index df714763225..64b503ed325 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -6,6 +6,7 @@ #include #include +#include namespace DB { @@ -329,19 +330,24 @@ void ColumnLowCardinality::getPermutation(bool reverse, size_t limit, int nan_di } } -void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const +void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= size() || limit >= equal_range.back().second) + if (equal_ranges.empty()) + return; + + if (limit >= size() || limit >= equal_ranges.back().second) limit = 0; - size_t n = equal_range.size(); + size_t number_of_ranges = equal_ranges.size(); if (limit) - --n; + --number_of_ranges; EqualRanges new_ranges; - for (size_t i = 0; i < n; ++i) + SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); + + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto& [first, last] = equal_range[i]; + const auto& [first, last] = equal_ranges[i]; if (reverse) std::sort(res.begin() + first, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b) {return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; }); @@ -366,7 +372,13 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan if (limit) { - const auto& [first, last] = equal_range.back(); + const auto & [first, last] = equal_ranges.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b) {return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) > 0; }); @@ -374,6 +386,7 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, [this, nan_direction_hint](size_t a, size_t b) {return getDictionary().compareAt(getIndexes().getUInt(a), getIndexes().getUInt(b), getDictionary(), nan_direction_hint) < 0; }); auto new_first = first; + for (auto j = first + 1; j < limit; ++j) { if (getDictionary().compareAt(getIndexes().getUInt(res[new_first]), getIndexes().getUInt(res[j]), getDictionary(), nan_direction_hint) != 0) @@ -384,6 +397,7 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan new_first = j; } } + auto new_last = limit; for (auto j = limit; j < last; ++j) { @@ -396,7 +410,6 @@ void ColumnLowCardinality::updatePermutation(bool reverse, size_t limit, int nan if (new_last - new_first > 1) new_ranges.emplace_back(new_first, new_last); } - equal_range = std::move(new_ranges); } std::vector ColumnLowCardinality::scatter(ColumnIndex num_columns, const Selector & selector) const diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 888410202f0..bdbc941c1e7 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -329,73 +329,113 @@ void ColumnNullable::getPermutation(bool reverse, size_t limit, int null_directi } } -void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const +void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= equal_range.back().second || limit >= size()) - limit = 0; + if (equal_ranges.empty()) + return; - EqualRanges new_ranges, temp_ranges; + /// We will sort nested columns into `new_ranges` and call updatePermutation in next columns with `null_ranges`. + EqualRanges new_ranges, null_ranges; - for (const auto &[first, last] : equal_range) + const auto is_nulls_last = ((null_direction_hint > 0) != reverse); + + if (is_nulls_last) { - bool direction = ((null_direction_hint > 0) != reverse); /// Shift all NULL values to the end. - - size_t read_idx = first; - size_t write_idx = first; - while (read_idx < last && (isNullAt(res[read_idx])^direction)) + for (const auto & [first, last] : equal_ranges) { - ++read_idx; - ++write_idx; - } + /// Current interval is righter than limit. + if (limit && first > limit) + break; - ++read_idx; + /// Consider a half interval [first, last) + size_t read_idx = first; + size_t write_idx = first; + size_t end_idx = last; - /// Invariants: - /// write_idx < read_idx - /// write_idx points to NULL - /// read_idx will be incremented to position of next not-NULL - /// there are range of NULLs between write_idx and read_idx - 1, - /// We are moving elements from end to begin of this range, - /// so range will "bubble" towards the end. - /// Relative order of NULL elements could be changed, - /// but relative order of non-NULLs is preserved. - - while (read_idx < last && write_idx < last) - { - if (isNullAt(res[read_idx])^direction) + /// We can't check the limit here because the interval is not sorted by nested column. + while (read_idx < end_idx && !isNullAt(res[read_idx])) { - std::swap(res[read_idx], res[write_idx]); + ++read_idx; ++write_idx; } - ++read_idx; - } - if (write_idx - first > 1) - { - if (direction) - temp_ranges.emplace_back(first, write_idx); - else + ++read_idx; + + /// Invariants: + /// write_idx < read_idx + /// write_idx points to NULL + /// read_idx will be incremented to position of next not-NULL + /// there are range of NULLs between write_idx and read_idx - 1, + /// We are moving elements from end to begin of this range, + /// so range will "bubble" towards the end. + /// Relative order of NULL elements could be changed, + /// but relative order of non-NULLs is preserved. + + while (read_idx < end_idx && write_idx < end_idx) + { + if (!isNullAt(res[read_idx])) + { + std::swap(res[read_idx], res[write_idx]); + ++write_idx; + } + ++read_idx; + } + + /// We have a range [first, write_idx) of non-NULL values + if (first != write_idx) new_ranges.emplace_back(first, write_idx); - } - - if (last - write_idx > 1) - { - if (direction) - new_ranges.emplace_back(write_idx, last); - else - temp_ranges.emplace_back(write_idx, last); + /// We have a range [write_idx, list) of NULL values + if (write_idx != last) + null_ranges.emplace_back(write_idx, last); } } - while (!new_ranges.empty() && limit && limit <= new_ranges.back().first) - new_ranges.pop_back(); + else + { + /// Shift all NULL values to the beginning. + for (const auto & [first, last] : equal_ranges) + { + /// Current interval is righter than limit. + if (limit && first > limit) + break; - if (!temp_ranges.empty()) - getNestedColumn().updatePermutation(reverse, limit, null_direction_hint, res, temp_ranges); + ssize_t read_idx = last - 1; + ssize_t write_idx = last - 1; + ssize_t begin_idx = first; - equal_range.resize(temp_ranges.size() + new_ranges.size()); - std::merge(temp_ranges.begin(), temp_ranges.end(), new_ranges.begin(), new_ranges.end(), equal_range.begin()); + while (read_idx >= begin_idx && !isNullAt(res[read_idx])) + { + --read_idx; + --write_idx; + } + + --read_idx; + + while (read_idx >= begin_idx && write_idx >= begin_idx) + { + if (!isNullAt(res[read_idx])) + { + std::swap(res[read_idx], res[write_idx]); + --write_idx; + } + --read_idx; + } + + /// We have a range [write_idx+1, last) of non-NULL values + if (write_idx != static_cast(last)) + new_ranges.emplace_back(write_idx + 1, last); + + /// We have a range [first, write_idx+1) of NULL values + if (static_cast(first) != write_idx) + null_ranges.emplace_back(first, write_idx + 1); + } + } + + getNestedColumn().updatePermutation(reverse, limit, null_direction_hint, res, new_ranges); + + equal_ranges = std::move(new_ranges); + std::move(null_ranges.begin(), null_ranges.end(), std::back_inserter(equal_ranges)); } void ColumnNullable::gather(ColumnGathererStream & gatherer) diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 6c84107caae..23414626a59 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -9,7 +9,7 @@ #include #include - +#include namespace DB { @@ -325,25 +325,30 @@ void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_directio } } -void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res, EqualRanges & equal_range) const +void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= size() || limit > equal_range.back().second) + if (equal_ranges.empty()) + return; + + if (limit >= size() || limit > equal_ranges.back().second) limit = 0; EqualRanges new_ranges; - auto less_true = less(*this); - auto less_false = less(*this); - size_t n = equal_range.size(); - if (limit) - --n; + SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); - for (size_t i = 0; i < n; ++i) + size_t number_of_ranges = equal_ranges.size(); + if (limit) + --number_of_ranges; + + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto &[first, last] = equal_range[i]; + const auto & [first, last] = equal_ranges[i]; + if (reverse) - std::sort(res.begin() + first, res.begin() + last, less_false); + std::sort(res.begin() + first, res.begin() + last, less(*this)); else - std::sort(res.begin() + first, res.begin() + last, less_true); + std::sort(res.begin() + first, res.begin() + last, less(*this)); + size_t new_first = first; for (size_t j = first + 1; j < last; ++j) { @@ -363,11 +368,18 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc if (limit) { - const auto &[first, last] = equal_range.back(); + const auto & [first, last] = equal_ranges.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) - std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less_false); + std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this)); else - std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less_true); + std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this)); + size_t new_first = first; for (size_t j = first + 1; j < limit; ++j) { @@ -394,7 +406,6 @@ void ColumnString::updatePermutation(bool reverse, size_t limit, int /*nan_direc if (new_last - new_first > 1) new_ranges.emplace_back(new_first, new_last); } - equal_range = std::move(new_ranges); } ColumnPtr ColumnString::replicate(const Offsets & replicate_offsets) const @@ -534,19 +545,25 @@ void ColumnString::getPermutationWithCollation(const Collator & collator, bool r } } -void ColumnString::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation &res, EqualRanges &equal_range) const +void ColumnString::updatePermutationWithCollation(const Collator & collator, bool reverse, size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const { - if (limit >= size() || limit >= equal_range.back().second) + if (equal_ranges.empty()) + return; + + if (limit >= size() || limit >= equal_ranges.back().second) limit = 0; - size_t n = equal_range.size(); + size_t number_of_ranges = equal_ranges.size(); if (limit) - --n; + --number_of_ranges; EqualRanges new_ranges; - for (size_t i = 0; i < n; ++i) + SCOPE_EXIT({equal_ranges = std::move(new_ranges);}); + + for (size_t i = 0; i < number_of_ranges; ++i) { - const auto& [first, last] = equal_range[i]; + const auto& [first, last] = equal_ranges[i]; + if (reverse) std::sort(res.begin() + first, res.begin() + last, lessWithCollation(*this, collator)); else @@ -566,16 +583,22 @@ void ColumnString::updatePermutationWithCollation(const Collator & collator, boo } if (last - new_first > 1) new_ranges.emplace_back(new_first, last); - } if (limit) { - const auto& [first, last] = equal_range.back(); + const auto & [first, last] = equal_ranges.back(); + + if (limit < first || limit > last) + return; + + /// Since then we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation(*this, collator)); else std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, lessWithCollation(*this, collator)); + auto new_first = first; for (auto j = first + 1; j < limit; ++j) { @@ -603,7 +626,6 @@ void ColumnString::updatePermutationWithCollation(const Collator & collator, boo if (new_last - new_first > 1) new_ranges.emplace_back(new_first, new_last); } - equal_range = std::move(new_ranges); } void ColumnString::protect() diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 87e5e37db51..98a6611edb7 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -344,15 +344,19 @@ void ColumnTuple::getPermutation(bool reverse, size_t limit, int nan_direction_h } } -void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const +void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const { - for (const auto& column : columns) - { - column->updatePermutation(reverse, limit, nan_direction_hint, res, equal_range); - while (limit && !equal_range.empty() && limit <= equal_range.back().first) - equal_range.pop_back(); + if (equal_ranges.empty()) + return; - if (equal_range.empty()) + for (const auto & column : columns) + { + column->updatePermutation(reverse, limit, nan_direction_hint, res, equal_ranges); + + while (limit && !equal_ranges.empty() && limit <= equal_ranges.back().first) + equal_ranges.pop_back(); + + if (equal_ranges.empty()) break; } } diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index d0edf65edd8..d87fdd65d15 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -382,17 +382,20 @@ int ColumnUnique::compareAt(size_t n, size_t m, const IColumn & rhs, } } - auto & column_unique = static_cast(rhs); + const auto & column_unique = static_cast(rhs); return getNestedColumn()->compareAt(n, m, *column_unique.getNestedColumn(), nan_direction_hint); } template -void ColumnUnique::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const +void ColumnUnique::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const { + if (equal_ranges.empty()) + return; + bool found_null_value_index = false; - for (size_t i = 0; i < equal_range.size() && !found_null_value_index; ++i) + for (size_t i = 0; i < equal_ranges.size() && !found_null_value_index; ++i) { - auto& [first, last] = equal_range[i]; + auto & [first, last] = equal_ranges[i]; for (auto j = first; j < last; ++j) { if (res[j] == getNullValueIndex()) @@ -409,14 +412,14 @@ void ColumnUnique::updatePermutation(bool reverse, size_t limit, int } if (last - first <= 1) { - equal_range.erase(equal_range.begin() + i); + equal_ranges.erase(equal_ranges.begin() + i); } found_null_value_index = true; break; } } } - getNestedColumn()->updatePermutation(reverse, limit, nan_direction_hint, res, equal_range); + getNestedColumn()->updatePermutation(reverse, limit, nan_direction_hint, res, equal_ranges); } template diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 852f876ddde..733a1510f93 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -15,17 +15,9 @@ #include #include #include +#include #include -#include -#if !defined(ARCADIA_BUILD) -# include -# if USE_OPENCL -# include "Common/BitonicSort.h" // Y_IGNORE -# endif -#else -#undef USE_OPENCL -#endif #ifdef __SSE2__ #include @@ -38,7 +30,6 @@ namespace ErrorCodes { extern const int PARAMETER_OUT_OF_BOUND; extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; - extern const int OPENCL_ERROR; extern const int LOGICAL_ERROR; } @@ -146,29 +137,6 @@ namespace }; } -template -void ColumnVector::getSpecialPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, - IColumn::SpecialSort special_sort) const -{ - if (special_sort == IColumn::SpecialSort::OPENCL_BITONIC) - { -#if !defined(ARCADIA_BUILD) -#if USE_OPENCL - if (!limit || limit >= data.size()) - { - res.resize(data.size()); - - if (data.empty() || BitonicSort::getInstance().sort(data, res, !reverse)) - return; - } -#else - throw DB::Exception("'special_sort = bitonic' specified but OpenCL not available", DB::ErrorCodes::OPENCL_ERROR); -#endif -#endif - } - - getPermutation(reverse, limit, nan_direction_hint, res); -} template void ColumnVector::getPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const @@ -243,10 +211,14 @@ void ColumnVector::getPermutation(bool reverse, size_t limit, int nan_directi template void ColumnVector::updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_range) const { + if (equal_range.empty()) + return; + if (limit >= data.size() || limit >= equal_range.back().second) limit = 0; EqualRanges new_ranges; + SCOPE_EXIT({equal_range = std::move(new_ranges);}); for (size_t i = 0; i < equal_range.size() - bool(limit); ++i) { @@ -275,6 +247,12 @@ void ColumnVector::updatePermutation(bool reverse, size_t limit, int nan_dire if (limit) { const auto & [first, last] = equal_range.back(); + + if (limit < first || limit > last) + return; + + /// Since then, we are working inside the interval. + if (reverse) std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint)); else @@ -307,7 +285,6 @@ void ColumnVector::updatePermutation(bool reverse, size_t limit, int nan_dire new_ranges.emplace_back(new_first, new_last); } } - equal_range = std::move(new_ranges); } template diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 55ab67d6214..c6600ca7e31 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -218,8 +218,6 @@ public: } void getPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override; - void getSpecialPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, - IColumn::SpecialSort) const override; void updatePermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_range) const override; diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 40ff0649f4f..14e6a9d7eed 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -267,17 +267,6 @@ public: */ virtual void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const = 0; - enum class SpecialSort - { - NONE = 0, - OPENCL_BITONIC, - }; - - virtual void getSpecialPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res, SpecialSort) const - { - getPermutation(reverse, limit, nan_direction_hint, res); - } - /*in updatePermutation we pass the current permutation and the intervals at which it should be sorted * Then for each interval separately (except for the last one, if there is a limit) * We sort it based on data about the current column, and find all the intervals within this diff --git a/src/Columns/ya.make b/src/Columns/ya.make index 910c479c2a9..78c0e1b992d 100644 --- a/src/Columns/ya.make +++ b/src/Columns/ya.make @@ -2,6 +2,8 @@ LIBRARY() ADDINCL( + contrib/libs/icu/common + contrib/libs/icu/i18n contrib/libs/pdqsort ) diff --git a/src/Common/BitonicSort.h b/src/Common/BitonicSort.h deleted file mode 100644 index 8140687c040..00000000000 --- a/src/Common/BitonicSort.h +++ /dev/null @@ -1,221 +0,0 @@ -#pragma once - -#include -#if !defined(__APPLE__) && !defined(__FreeBSD__) -#include -#endif - -#ifdef __APPLE__ -#include -#else -#include -#endif - -#include -#include -#include -#include -#include - -#include "oclBasics.h" -#include "bitonicSortKernels.cl" - -class BitonicSort -{ -public: - using KernelType = OCL::KernelType; - - enum Types - { - KernelInt8 = 0, - KernelUInt8, - KernelInt16, - KernelUInt16, - KernelInt32, - KernelUInt32, - KernelInt64, - KernelUInt64, - KernelMax - }; - - static BitonicSort & getInstance() - { - static BitonicSort instance = BitonicSort(); - return instance; - } - - /// Sorts given array in specified order. Returns `true` if given sequence was sorted, `false` otherwise. - template - bool sort(const DB::PaddedPODArray & data, DB::IColumn::Permutation & res, cl_uint sort_ascending [[maybe_unused]]) const - { - if constexpr ( - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v) - { - size_t data_size = data.size(); - - /// Getting the nearest power of 2. - size_t power = 8; - while (power < data_size) - power <<= 1; - - /// Allocates more space for additional stubs to be added if needed. - std::vector pairs_content(power); - std::vector pairs_indices(power); - - memcpy(&pairs_content[0], &data[0], sizeof(T) * data_size); - for (UInt32 i = 0; i < data_size; ++i) - pairs_indices[i] = i; - - fillWithStubs(pairs_content.data(), pairs_indices.data(), data_size, power - data_size, sort_ascending); - sort(pairs_content.data(), pairs_indices.data(), power, sort_ascending); - - for (size_t i = 0, shift = 0; i < power; ++i) - { - if (pairs_indices[i] >= data_size) - { - ++shift; - continue; - } - res[i - shift] = pairs_indices[i]; - } - - return true; - } - - return false; - } - - /// Creating a configuration instance with making all OpenCl required variables - /// such as device, platform, context, queue, program and kernel. - void configure() - { - OCL::Settings settings = OCL::Settings(1, nullptr, 1, nullptr, 1, 0); - - cl_platform_id platform = OCL::getPlatformID(settings); - cl_device_id device = OCL::getDeviceID(platform, settings); - cl_context gpu_context = OCL::makeContext(device, settings); - cl_command_queue command_queue = OCL::makeCommandQueue<2>(device, gpu_context, settings); - - cl_program program = OCL::makeProgram(bitonic_sort_kernels, gpu_context, device, settings); - - /// Creating kernels for each specified data type. - cl_int error = 0; - kernels.resize(KernelMax); - - kernels[KernelInt8] = std::shared_ptr(clCreateKernel(program, "bitonicSort_char", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelUInt8] = std::shared_ptr(clCreateKernel(program, "bitonicSort_uchar", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelInt16] = std::shared_ptr(clCreateKernel(program, "bitonicSort_short", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelUInt16] = std::shared_ptr(clCreateKernel(program, "bitonicSort_ushort", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelInt32] = std::shared_ptr(clCreateKernel(program, "bitonicSort_int", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelUInt32] = std::shared_ptr(clCreateKernel(program, "bitonicSort_uint", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelInt64] = std::shared_ptr(clCreateKernel(program, "bitonicSort_long", &error), clReleaseKernel); - OCL::checkError(error); - - kernels[KernelUInt64] = std::shared_ptr(clCreateKernel(program, "bitonicSort_ulong", &error), clReleaseKernel); - OCL::checkError(error); - - configuration = std::shared_ptr(new OCL::Configuration(device, gpu_context, command_queue, program)); - } - -private: - /// Dictionary with kernels for each type from list: uchar, char, ushort, short, uint, int, ulong and long. - std::vector> kernels; - /// Current configuration with core OpenCL instances. - std::shared_ptr configuration = nullptr; - - cl_kernel getKernel(Int8) const { return kernels[KernelInt8].get(); } - cl_kernel getKernel(UInt8) const { return kernels[KernelUInt8].get(); } - cl_kernel getKernel(Int16) const { return kernels[KernelInt16].get(); } - cl_kernel getKernel(UInt16) const { return kernels[KernelUInt16].get(); } - cl_kernel getKernel(Int32) const { return kernels[KernelInt32].get(); } - cl_kernel getKernel(UInt32) const { return kernels[KernelUInt32].get(); } - cl_kernel getKernel(Int64) const { return kernels[KernelInt64].get(); } - cl_kernel getKernel(UInt64) const { return kernels[KernelUInt64].get(); } - - /// Sorts p_input inplace with indices. Works only with arrays which size equals to power of two. - template - void sort(T * p_input, cl_uint * indices, cl_int array_size, cl_uint sort_ascending) const - { - cl_kernel kernel = getKernel(T(0)); - cl_int error = CL_SUCCESS; - cl_int num_stages = 0; - - for (cl_int temp = array_size; temp > 2; temp >>= 1) - num_stages++; - - /// Creating OpenCL buffers using input arrays memory. - cl_mem cl_input_buffer = OCL::createBuffer(p_input, array_size, configuration.get()->context()); - cl_mem cl_indices_buffer = OCL::createBuffer(indices, array_size, configuration.get()->context()); - - configureKernel(kernel, 0, static_cast(&cl_input_buffer)); - configureKernel(kernel, 1, static_cast(&cl_indices_buffer)); - configureKernel(kernel, 4, static_cast(&sort_ascending)); - - for (cl_int stage = 0; stage < num_stages; stage++) - { - configureKernel(kernel, 2, static_cast(&stage)); - - for (cl_int pass_of_stage = stage; pass_of_stage >= 0; pass_of_stage--) - { - configureKernel(kernel, 3, static_cast(&pass_of_stage)); - - /// Setting work-item dimensions. - size_t gsize = array_size / (2 * 4); - size_t global_work_size[1] = {pass_of_stage ? gsize : gsize << 1 }; // number of quad items in input array - - /// Executing kernel. - error = clEnqueueNDRangeKernel(configuration.get()->commandQueue(), kernel, 1, nullptr, - global_work_size, nullptr, 0, nullptr, nullptr); - OCL::checkError(error); - } - } - - /// Syncs all threads. - OCL::finishCommandQueue(configuration.get()->commandQueue()); - - OCL::releaseData(p_input, array_size, cl_input_buffer, configuration.get()->commandQueue()); - OCL::releaseData(indices, array_size, cl_indices_buffer, configuration.get()->commandQueue()); - } - - template - void configureKernel(cl_kernel kernel, int number_of_argument, void * source) const - { - cl_int error = clSetKernelArg(kernel, number_of_argument, sizeof(T), source); - OCL::checkError(error); - } - - /// Fills given sequences from `arraySize` index with `numberOfStubs` values. - template - void fillWithStubs(T * p_input, cl_uint * indices, cl_int array_size, cl_int number_of_stubs, cl_uint sort_ascending) const - { - T value = sort_ascending ? std::numeric_limits::max() : std::numeric_limits::min(); - for (cl_int index = 0; index < number_of_stubs; ++index) - { - p_input[array_size + index] = value; - indices[array_size + index] = array_size + index; - } - } - - BitonicSort() = default; - BitonicSort(BitonicSort const &) = delete; - void operator = (BitonicSort const &) = delete; -}; diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 85da23fb303..bf475bc9b21 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -486,7 +486,6 @@ namespace ErrorCodes extern const int NO_REMOTE_SHARD_AVAILABLE = 519; extern const int CANNOT_DETACH_DICTIONARY_AS_TABLE = 520; extern const int ATOMIC_RENAME_FAIL = 521; - extern const int OPENCL_ERROR = 522; extern const int UNKNOWN_ROW_POLICY = 523; extern const int ALTER_OF_COLUMN_IS_FORBIDDEN = 524; extern const int INCORRECT_DISK_INDEX = 525; diff --git a/src/Common/PODArray.h b/src/Common/PODArray.h index 1084f0800cc..7bd9550500e 100644 --- a/src/Common/PODArray.h +++ b/src/Common/PODArray.h @@ -214,6 +214,9 @@ public: void clear() { c_end = c_start; } template +#if defined(__clang__) + ALWAYS_INLINE /// Better performance in clang build, worse performance in gcc build. +#endif void reserve(size_t n, TAllocatorParams &&... allocator_params) { if (n > capacity()) diff --git a/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp index ffa7f0462c9..a738512bb30 100644 --- a/src/Common/SymbolIndex.cpp +++ b/src/Common/SymbolIndex.cpp @@ -59,7 +59,7 @@ Otherwise you will get only exported symbols from program headers. # pragma clang diagnostic ignored "-Wunused-macros" #endif -#define __msan_unpoison_string(X) +#define __msan_unpoison_string(X) // NOLINT #if defined(__has_feature) # if __has_feature(memory_sanitizer) # undef __msan_unpoison_string diff --git a/src/Common/ThreadFuzzer.h b/src/Common/ThreadFuzzer.h index d0693945cb0..dabf6209f67 100644 --- a/src/Common/ThreadFuzzer.h +++ b/src/Common/ThreadFuzzer.h @@ -31,10 +31,8 @@ namespace DB * * Notes: * - it can be also implemented with instrumentation (example: LLVM Xray) instead of signals. - * - it's also reasonable to insert glitches around interesting functions (example: mutex lock/unlock, starting of threads, etc.), - * it is doable with wrapping these functions (todo). * - we should also make the sleep time random. - * - sleep obviously helps, but the effect of yield and migration is unclear. + * - sleep and migration obviously helps, but the effect of yield is unclear. * * In addition, we allow to inject glitches around thread synchronization functions. * Example: diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 49516d777fb..dda16c7725d 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -13,6 +13,7 @@ namespace DB namespace ErrorCodes { extern const int CANNOT_SCHEDULE_TASK; + extern const int LOGICAL_ERROR; } } @@ -233,6 +234,7 @@ void ThreadPoolImpl::worker(typename std::list::iterator thread_ std::is_same_v ? CurrentMetrics::GlobalThreadActive : CurrentMetrics::LocalThreadActive); job(); + job = {}; } catch (...) { @@ -276,7 +278,11 @@ std::unique_ptr GlobalThreadPool::the_instance; void GlobalThreadPool::initialize(size_t max_threads) { - assert(!the_instance); + if (the_instance) + { + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, + "The global thread pool is initialized twice"); + } the_instance.reset(new GlobalThreadPool(max_threads, 1000 /*max_free_threads*/, 10000 /*max_queue_size*/, diff --git a/src/Common/UnicodeBar.cpp b/src/Common/UnicodeBar.cpp new file mode 100644 index 00000000000..8ff5e2052c1 --- /dev/null +++ b/src/Common/UnicodeBar.cpp @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + namespace ErrorCodes + { + extern const int PARAMETER_OUT_OF_BOUND; + } +} + + +namespace UnicodeBar +{ + double getWidth(Int64 x, Int64 min, Int64 max, double max_width) + { + if (x <= min) + return 0; + + if (x >= max) + return max_width; + + /// The case when max - min overflows + Int64 max_difference; + if (common::subOverflow(max, min, max_difference)) + throw DB::Exception(DB::ErrorCodes::PARAMETER_OUT_OF_BOUND, "The arguments to render unicode bar will lead to arithmetic overflow"); + + return (x - min) * max_width / max_difference; + } + + size_t getWidthInBytes(double width) + { + return ceil(width - 1.0 / 8) * UNICODE_BAR_CHAR_SIZE; + } + + void render(double width, char * dst) + { + size_t floor_width = floor(width); + + for (size_t i = 0; i < floor_width; ++i) + { + memcpy(dst, "█", UNICODE_BAR_CHAR_SIZE); + dst += UNICODE_BAR_CHAR_SIZE; + } + + size_t remainder = floor((width - floor_width) * 8); + + if (remainder) + { + memcpy(dst, &"▏▎▍▌▋▋▊▉"[(remainder - 1) * UNICODE_BAR_CHAR_SIZE], UNICODE_BAR_CHAR_SIZE); + dst += UNICODE_BAR_CHAR_SIZE; + } + + *dst = 0; + } + + std::string render(double width) + { + std::string res(getWidthInBytes(width), '\0'); + render(width, res.data()); + return res; + } +} + diff --git a/src/Common/UnicodeBar.h b/src/Common/UnicodeBar.h index 9a5bcecbd62..0c62bd7e8f7 100644 --- a/src/Common/UnicodeBar.h +++ b/src/Common/UnicodeBar.h @@ -1,7 +1,5 @@ #pragma once -#include -#include #include #include @@ -10,54 +8,12 @@ /** Allows you to draw a unicode-art bar whose width is displayed with a resolution of 1/8 character. */ - - namespace UnicodeBar { - using DB::Int64; - - inline double getWidth(Int64 x, Int64 min, Int64 max, double max_width) - { - if (x <= min) - return 0; - - if (x >= max) - return max_width; - - return (x - min) * max_width / (max - min); - } - - inline size_t getWidthInBytes(double width) - { - return ceil(width - 1.0 / 8) * UNICODE_BAR_CHAR_SIZE; - } + double getWidth(Int64 x, Int64 min, Int64 max, double max_width); + size_t getWidthInBytes(double width); /// In `dst` there must be a space for barWidthInBytes(width) characters and a trailing zero. - inline void render(double width, char * dst) - { - size_t floor_width = floor(width); - - for (size_t i = 0; i < floor_width; ++i) - { - memcpy(dst, "█", UNICODE_BAR_CHAR_SIZE); - dst += UNICODE_BAR_CHAR_SIZE; - } - - size_t remainder = floor((width - floor_width) * 8); - - if (remainder) - { - memcpy(dst, &"▏▎▍▌▋▋▊▉"[(remainder - 1) * UNICODE_BAR_CHAR_SIZE], UNICODE_BAR_CHAR_SIZE); - dst += UNICODE_BAR_CHAR_SIZE; - } - - *dst = 0; - } - - inline std::string render(double width) - { - std::string res(getWidthInBytes(width), '\0'); - render(width, res.data()); - return res; - } + void render(double width, char * dst); + std::string render(double width); } diff --git a/src/Common/getMappedArea.cpp b/src/Common/getMappedArea.cpp new file mode 100644 index 00000000000..6e452f32b96 --- /dev/null +++ b/src/Common/getMappedArea.cpp @@ -0,0 +1,84 @@ +#include "getMappedArea.h" +#include + +#if defined(__linux__) + +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +namespace +{ + +uintptr_t readAddressHex(DB::ReadBuffer & in) +{ + uintptr_t res = 0; + while (!in.eof()) + { + if (isHexDigit(*in.position())) + { + res *= 16; + res += unhex(*in.position()); + ++in.position(); + } + else + break; + } + return res; +} + +} + +std::pair getMappedArea(void * ptr) +{ + using namespace DB; + + uintptr_t uintptr = reinterpret_cast(ptr); + ReadBufferFromFile in("/proc/self/maps"); + + while (!in.eof()) + { + uintptr_t begin = readAddressHex(in); + assertChar('-', in); + uintptr_t end = readAddressHex(in); + skipToNextLineOrEOF(in); + + if (begin <= uintptr && uintptr < end) + return {reinterpret_cast(begin), end - begin}; + } + + throw Exception("Cannot find mapped area for pointer", ErrorCodes::LOGICAL_ERROR); +} + +} + +#else + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +std::pair getMappedArea(void *) +{ + throw Exception("The function getMappedArea is implemented only for Linux", ErrorCodes::NOT_IMPLEMENTED); +} + +} + +#endif + diff --git a/src/Common/getMappedArea.h b/src/Common/getMappedArea.h new file mode 100644 index 00000000000..3317f72bdc9 --- /dev/null +++ b/src/Common/getMappedArea.h @@ -0,0 +1,12 @@ +#include +#include + + +namespace DB +{ + +/// Find the address and size of the mapped memory region pointed by ptr. +/// Throw exception if not found. +std::pair getMappedArea(void * ptr); + +} diff --git a/src/Common/oclBasics.h b/src/Common/oclBasics.h deleted file mode 100644 index a3e7636af1b..00000000000 --- a/src/Common/oclBasics.h +++ /dev/null @@ -1,354 +0,0 @@ -#pragma once - -#include -#if USE_OPENCL - -#if !defined(__APPLE__) && !defined(__FreeBSD__) -#include -#endif - -#ifdef __APPLE__ -#include -#else -#include -#endif - -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int OPENCL_ERROR; -} -} - -struct OCL -{ - using KernelType = std::remove_reference::type; - - /** - * Structure which represents the most essential settings of common OpenCl entities. - */ - struct Settings - { - // Platform info - cl_uint number_of_platform_entries; - cl_uint * number_of_available_platforms; - - // Devices info - cl_uint number_of_devices_entries; - cl_uint * number_of_available_devices; - - // Context settings - cl_context_properties * context_properties; - - void (* context_callback)(const char *, const void *, size_t, void *); - - void * context_callback_data; - - // Command queue settings - cl_command_queue_properties command_queue_properties; - - // Build settings - cl_uint number_of_program_source_pointers; - - void (* build_notification_routine)(cl_program, void *user_data); - - void * build_callback_data; - char * build_options; - - Settings(cl_uint number_of_platform_entries_, - cl_uint * number_of_available_platforms_, - cl_uint number_of_devices_entries_, - cl_uint * number_of_available_devices_, - cl_uint number_of_program_source_pointers_, - cl_command_queue_properties command_queue_properties_, - cl_context_properties * context_properties_ = nullptr, - void * context_data_callback_ = nullptr, - void (* context_callback_)(const char *, const void *, size_t, void *) = nullptr, - void (* build_notification_routine_)(cl_program, void * user_data) = nullptr, - void * build_callback_data_ = nullptr, - char * build_options_ = nullptr) - { - this->number_of_platform_entries = number_of_platform_entries_; - this->number_of_available_platforms = number_of_available_platforms_; - this->number_of_devices_entries = number_of_devices_entries_; - this->number_of_available_devices = number_of_available_devices_; - this->number_of_program_source_pointers = number_of_program_source_pointers_; - this->command_queue_properties = command_queue_properties_; - this->context_properties = context_properties_; - this->context_callback = context_callback_; - this->context_callback_data = context_data_callback_; - this->build_notification_routine = build_notification_routine_; - this->build_callback_data = build_callback_data_; - this->build_options = build_options_; - } - }; - - - /** - * Configuration with already created OpenCl common entities. - */ - class Configuration - { - public: - - Configuration(cl_device_id device, cl_context gpu_context, - cl_command_queue command_queue, cl_program program) - { - this->device_ = device; - this->gpu_context_ = std::shared_ptr(gpu_context, clReleaseContext); - this->command_queue_ = std::shared_ptr(command_queue, clReleaseCommandQueue); - this->program_ = std::shared_ptr(program, clReleaseProgram); - } - - cl_device_id device() { return device_; } - - cl_context context() { return gpu_context_.get(); } - - cl_command_queue commandQueue() { return command_queue_.get(); } - - cl_program program() { return program_.get(); } - - private: - - using ProgramType = std::remove_reference::type; - using CommandQueueType = std::remove_reference::type; - using ContextType = std::remove_reference::type; - - cl_device_id device_; - - std::shared_ptr gpu_context_; - std::shared_ptr command_queue_; - std::shared_ptr program_; - }; - - - static String opencl_error_to_str(cl_int error) - { -#define CASE_CL_CONSTANT(NAME) case NAME: return #NAME; - - // Suppose that no combinations are possible. - switch (error) - { - CASE_CL_CONSTANT(CL_SUCCESS) - CASE_CL_CONSTANT(CL_DEVICE_NOT_FOUND) - CASE_CL_CONSTANT(CL_DEVICE_NOT_AVAILABLE) - CASE_CL_CONSTANT(CL_COMPILER_NOT_AVAILABLE) - CASE_CL_CONSTANT(CL_MEM_OBJECT_ALLOCATION_FAILURE) - CASE_CL_CONSTANT(CL_OUT_OF_RESOURCES) - CASE_CL_CONSTANT(CL_OUT_OF_HOST_MEMORY) - CASE_CL_CONSTANT(CL_PROFILING_INFO_NOT_AVAILABLE) - CASE_CL_CONSTANT(CL_MEM_COPY_OVERLAP) - CASE_CL_CONSTANT(CL_IMAGE_FORMAT_MISMATCH) - CASE_CL_CONSTANT(CL_IMAGE_FORMAT_NOT_SUPPORTED) - CASE_CL_CONSTANT(CL_BUILD_PROGRAM_FAILURE) - CASE_CL_CONSTANT(CL_MAP_FAILURE) - CASE_CL_CONSTANT(CL_MISALIGNED_SUB_BUFFER_OFFSET) - CASE_CL_CONSTANT(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST) - CASE_CL_CONSTANT(CL_COMPILE_PROGRAM_FAILURE) - CASE_CL_CONSTANT(CL_LINKER_NOT_AVAILABLE) - CASE_CL_CONSTANT(CL_LINK_PROGRAM_FAILURE) - CASE_CL_CONSTANT(CL_DEVICE_PARTITION_FAILED) - CASE_CL_CONSTANT(CL_KERNEL_ARG_INFO_NOT_AVAILABLE) - CASE_CL_CONSTANT(CL_INVALID_VALUE) - CASE_CL_CONSTANT(CL_INVALID_DEVICE_TYPE) - CASE_CL_CONSTANT(CL_INVALID_PLATFORM) - CASE_CL_CONSTANT(CL_INVALID_DEVICE) - CASE_CL_CONSTANT(CL_INVALID_CONTEXT) - CASE_CL_CONSTANT(CL_INVALID_QUEUE_PROPERTIES) - CASE_CL_CONSTANT(CL_INVALID_COMMAND_QUEUE) - CASE_CL_CONSTANT(CL_INVALID_HOST_PTR) - CASE_CL_CONSTANT(CL_INVALID_MEM_OBJECT) - CASE_CL_CONSTANT(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR) - CASE_CL_CONSTANT(CL_INVALID_IMAGE_SIZE) - CASE_CL_CONSTANT(CL_INVALID_SAMPLER) - CASE_CL_CONSTANT(CL_INVALID_BINARY) - CASE_CL_CONSTANT(CL_INVALID_BUILD_OPTIONS) - CASE_CL_CONSTANT(CL_INVALID_PROGRAM) - CASE_CL_CONSTANT(CL_INVALID_PROGRAM_EXECUTABLE) - CASE_CL_CONSTANT(CL_INVALID_KERNEL_NAME) - CASE_CL_CONSTANT(CL_INVALID_KERNEL_DEFINITION) - CASE_CL_CONSTANT(CL_INVALID_KERNEL) - CASE_CL_CONSTANT(CL_INVALID_ARG_INDEX) - CASE_CL_CONSTANT(CL_INVALID_ARG_VALUE) - CASE_CL_CONSTANT(CL_INVALID_ARG_SIZE) - CASE_CL_CONSTANT(CL_INVALID_KERNEL_ARGS) - CASE_CL_CONSTANT(CL_INVALID_WORK_DIMENSION) - CASE_CL_CONSTANT(CL_INVALID_WORK_GROUP_SIZE) - CASE_CL_CONSTANT(CL_INVALID_WORK_ITEM_SIZE) - CASE_CL_CONSTANT(CL_INVALID_GLOBAL_OFFSET) - CASE_CL_CONSTANT(CL_INVALID_EVENT_WAIT_LIST) - CASE_CL_CONSTANT(CL_INVALID_EVENT) - CASE_CL_CONSTANT(CL_INVALID_OPERATION) - CASE_CL_CONSTANT(CL_INVALID_GL_OBJECT) - CASE_CL_CONSTANT(CL_INVALID_BUFFER_SIZE) - CASE_CL_CONSTANT(CL_INVALID_MIP_LEVEL) - CASE_CL_CONSTANT(CL_INVALID_GLOBAL_WORK_SIZE) - CASE_CL_CONSTANT(CL_INVALID_PROPERTY) - CASE_CL_CONSTANT(CL_INVALID_IMAGE_DESCRIPTOR) - CASE_CL_CONSTANT(CL_INVALID_COMPILER_OPTIONS) - CASE_CL_CONSTANT(CL_INVALID_LINKER_OPTIONS) - CASE_CL_CONSTANT(CL_INVALID_DEVICE_PARTITION_COUNT) - default: - return "UNKNOWN ERROR CODE "; - } - -#undef CASE_CL_CONSTANT - } - - - static void checkError(cl_int error) - { - if (error != CL_SUCCESS) - throw DB::Exception("OpenCL error: " + opencl_error_to_str(error), DB::ErrorCodes::OPENCL_ERROR); - } - - - /// Getting OpenCl main entities. - - static cl_platform_id getPlatformID(const Settings & settings) - { - cl_platform_id platform; - cl_int error = clGetPlatformIDs(settings.number_of_platform_entries, &platform, - settings.number_of_available_platforms); - checkError(error); - return platform; - } - - static cl_device_id getDeviceID(cl_platform_id & platform, const Settings & settings) - { - cl_device_id device; - cl_int error = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, settings.number_of_devices_entries, - &device, settings.number_of_available_devices); - OCL::checkError(error); - return device; - } - - static cl_context makeContext(cl_device_id & device, const Settings & settings) - { - cl_int error; - cl_context gpu_context = clCreateContext(settings.context_properties, settings.number_of_devices_entries, - &device, settings.context_callback, settings.context_callback_data, - &error); - OCL::checkError(error); - return gpu_context; - } - - template - static cl_command_queue makeCommandQueue(cl_device_id & device, cl_context & context, const Settings & settings [[maybe_unused]]) - { - cl_int error; - cl_command_queue command_queue; - - if constexpr (version == 1) - { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - command_queue = clCreateCommandQueue(context, device, settings.command_queue_properties, &error); -#pragma GCC diagnostic pop - } - else - { -#ifdef CL_VERSION_2_0 - command_queue = clCreateCommandQueueWithProperties(context, device, nullptr, &error); -#else - throw DB::Exception("Binary is built with OpenCL version < 2.0", DB::ErrorCodes::OPENCL_ERROR); -#endif - } - - OCL::checkError(error); - return command_queue; - } - - static cl_program makeProgram(const char * source_code, cl_context context, - cl_device_id device_id, const Settings & settings) - { - cl_int error = 0; - size_t source_size = strlen(source_code); - - cl_program program = clCreateProgramWithSource(context, settings.number_of_program_source_pointers, - &source_code, &source_size, &error); - checkError(error); - - error = clBuildProgram(program, settings.number_of_devices_entries, &device_id, settings.build_options, - settings.build_notification_routine, settings.build_callback_data); - - /// Combining additional logs output when program build failed. - if (error == CL_BUILD_PROGRAM_FAILURE) - { - size_t log_size; - error = clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, 0, nullptr, &log_size); - - checkError(error); - - std::vector log(log_size); - clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, log_size, log.data(), nullptr); - - checkError(error); - throw DB::Exception(log.data(), DB::ErrorCodes::OPENCL_ERROR); - } - - checkError(error); - return program; - } - - /// Configuring buffer for given input data - - template - static cl_mem createBuffer(K * p_input, cl_int array_size, cl_context context, cl_int elements_size = sizeof(K)) - { - cl_int error = CL_SUCCESS; - cl_mem cl_input_buffer = clCreateBuffer( - context, - CL_MEM_USE_HOST_PTR, - zeroCopySizeAlignment(elements_size * array_size), - p_input, - &error); - checkError(error); - return cl_input_buffer; - } - - static size_t zeroCopySizeAlignment(size_t required_size) - { - return required_size + (~required_size + 1) % 64; - } - - /// Manipulating with common OpenCL variables. - - static void finishCommandQueue(cl_command_queue command_queue) - { - // Blocks until all previously queued OpenCL commands in a queue are issued to the associated device. - cl_int error = clFinish(command_queue); - OCL::checkError(error); - } - - template - static void releaseData(T * origin, cl_int array_size, cl_mem cl_buffer, cl_command_queue command_queue, size_t offset = 0) - { - cl_int error = CL_SUCCESS; - - void * tmp_ptr = nullptr; - - // No events specified to be completed before enqueueing buffers, - // so `num_events_in_wait_list` passed with `0` value. - - tmp_ptr = clEnqueueMapBuffer(command_queue, cl_buffer, true, CL_MAP_READ, - offset, sizeof(cl_int) * array_size, 0, nullptr, nullptr, &error); - OCL::checkError(error); - if (tmp_ptr != origin) - throw DB::Exception("clEnqueueMapBuffer failed to return original pointer", DB::ErrorCodes::OPENCL_ERROR); - - error = clEnqueueUnmapMemObject(command_queue, cl_buffer, tmp_ptr, 0, nullptr, nullptr); - checkError(error); - - error = clReleaseMemObject(cl_buffer); - checkError(error); - } -}; - -#endif diff --git a/src/Common/remapExecutable.cpp b/src/Common/remapExecutable.cpp index 13bce459022..5418290b24f 100644 --- a/src/Common/remapExecutable.cpp +++ b/src/Common/remapExecutable.cpp @@ -2,17 +2,14 @@ #include #include +#include #include #include -#include - -#include -#include +#include #include -#include -#include +#include #include "remapExecutable.h" @@ -22,7 +19,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int CANNOT_ALLOCATE_MEMORY; } @@ -30,48 +26,6 @@ namespace ErrorCodes namespace { -uintptr_t readAddressHex(DB::ReadBuffer & in) -{ - uintptr_t res = 0; - while (!in.eof()) - { - if (isHexDigit(*in.position())) - { - res *= 16; - res += unhex(*in.position()); - ++in.position(); - } - else - break; - } - return res; -} - - -/** Find the address and size of the mapped memory region pointed by ptr. - */ -std::pair getMappedArea(void * ptr) -{ - using namespace DB; - - uintptr_t uintptr = reinterpret_cast(ptr); - ReadBufferFromFile in("/proc/self/maps"); - - while (!in.eof()) - { - uintptr_t begin = readAddressHex(in); - assertChar('-', in); - uintptr_t end = readAddressHex(in); - skipToNextLineOrEOF(in); - - if (begin <= uintptr && uintptr < end) - return {reinterpret_cast(begin), end - begin}; - } - - throw Exception("Cannot find mapped area for pointer", ErrorCodes::LOGICAL_ERROR); -} - - __attribute__((__noinline__)) int64_t our_syscall(...) { __asm__ __volatile__ (R"( diff --git a/src/Common/renameat2.cpp b/src/Common/renameat2.cpp index 323b72267a6..24e414122dc 100644 --- a/src/Common/renameat2.cpp +++ b/src/Common/renameat2.cpp @@ -48,8 +48,10 @@ static bool supportsRenameat2Impl() #if defined(__NR_renameat2) -static void renameat2(const std::string & old_path, const std::string & new_path, int flags) +static bool renameat2(const std::string & old_path, const std::string & new_path, int flags) { + if (!supportsRenameat2()) + return false; if (old_path.empty() || new_path.empty()) throw Exception("Cannot rename " + old_path + " to " + new_path + ": path is empty", ErrorCodes::LOGICAL_ERROR); @@ -57,7 +59,14 @@ static void renameat2(const std::string & old_path, const std::string & new_path /// int newdirfd (ignored for absolute newpath), const char *newpath, /// unsigned int flags if (0 == syscall(__NR_renameat2, AT_FDCWD, old_path.c_str(), AT_FDCWD, new_path.c_str(), flags)) - return; + return true; + + /// EINVAL means that filesystem does not support one of the flags. + /// It also may happen when running clickhouse in docker with Mac OS as a host OS. + /// supportsRenameat2() with uname is not enough in this case, because virtualized Linux kernel is used. + /// Other cases when EINVAL can be returned should never happen. + if (errno == EINVAL) + return false; if (errno == EEXIST) throwFromErrno("Cannot rename " + old_path + " to " + new_path + " because the second path already exists", ErrorCodes::ATOMIC_RENAME_FAIL); @@ -70,10 +79,9 @@ static void renameat2(const std::string & old_path, const std::string & new_path #define RENAME_NOREPLACE -1 #define RENAME_EXCHANGE -1 -[[noreturn]] -static void renameat2(const std::string &, const std::string &, int) +static bool renameat2(const std::string &, const std::string &, int) { - throw Exception("Compiled without renameat2() support", ErrorCodes::UNSUPPORTED_METHOD); + return false; } #endif @@ -104,18 +112,19 @@ bool supportsRenameat2() void renameNoReplace(const std::string & old_path, const std::string & new_path) { - if (supportsRenameat2()) - renameat2(old_path, new_path, RENAME_NOREPLACE); - else + if (!renameat2(old_path, new_path, RENAME_NOREPLACE)) renameNoReplaceFallback(old_path, new_path); } void renameExchange(const std::string & old_path, const std::string & new_path) { - if (supportsRenameat2()) - renameat2(old_path, new_path, RENAME_EXCHANGE); - else + if (!renameat2(old_path, new_path, RENAME_EXCHANGE)) renameExchangeFallback(old_path, new_path); } +bool renameExchangeIfSupported(const std::string & old_path, const std::string & new_path) +{ + return renameat2(old_path, new_path, RENAME_EXCHANGE); +} + } diff --git a/src/Common/renameat2.h b/src/Common/renameat2.h index 333f85541f1..141c5d385c5 100644 --- a/src/Common/renameat2.h +++ b/src/Common/renameat2.h @@ -14,4 +14,7 @@ void renameNoReplace(const std::string & old_path, const std::string & new_path) /// Atomically exchange oldpath and newpath. Throw exception if some of them does not exist void renameExchange(const std::string & old_path, const std::string & new_path); +/// Returns false instead of throwing exception if renameat2 is not supported +bool renameExchangeIfSupported(const std::string & old_path, const std::string & new_path); + } diff --git a/src/Common/tests/CMakeLists.txt b/src/Common/tests/CMakeLists.txt index 8de9424e044..6a39c2f8553 100644 --- a/src/Common/tests/CMakeLists.txt +++ b/src/Common/tests/CMakeLists.txt @@ -35,11 +35,6 @@ add_executable (radix_sort radix_sort.cpp) target_link_libraries (radix_sort PRIVATE clickhouse_common_io) target_include_directories(radix_sort SYSTEM PRIVATE ${PDQSORT_INCLUDE_DIR}) -if (USE_OPENCL) - add_executable (bitonic_sort bitonic_sort.cpp) - target_link_libraries (bitonic_sort PRIVATE clickhouse_common_io ${OPENCL_LINKER_FLAGS} ${OpenCL_LIBRARIES}) -endif () - add_executable (arena_with_free_lists arena_with_free_lists.cpp) target_link_libraries (arena_with_free_lists PRIVATE dbms) diff --git a/src/Common/tests/bitonic_sort.cpp b/src/Common/tests/bitonic_sort.cpp deleted file mode 100644 index 2545662c8cb..00000000000 --- a/src/Common/tests/bitonic_sort.cpp +++ /dev/null @@ -1,174 +0,0 @@ -#include -#include - -#if !defined(__APPLE__) && !defined(__FreeBSD__) -#include -#endif -#include -#include -#include -#include -#include -#include - -#include "Common/BitonicSort.h" - - -/// Generates vector of size 8 for testing. -/// Vector contains max possible value, min possible value and duplicate values. -template -static void generateTest(std::vector & data, Type min_value, Type max_value) -{ - int size = 10; - - data.resize(size); - data[0] = 10; - data[1] = max_value; - data[2] = 10; - data[3] = 20; - data[4] = min_value; - data[5] = min_value + 1; - data[6] = max_value - 5; - data[7] = 1; - data[8] = 42; - data[9] = max_value - 1; -} - - -static void check(const std::vector & indices, bool reverse = true) -{ - std::vector reference_indices{4, 5, 7, 0, 2, 3, 8, 6, 9, 1}; - if (reverse) std::reverse(reference_indices.begin(), reference_indices.end()); - - bool success = true; - for (size_t index = 0; index < reference_indices.size(); ++index) - { - if (indices[index] != reference_indices[index]) - { - success = false; - std::cerr << "Test failed. Reason: indices[" << index << "] = " - << indices[index] << ", it must be equal to " << reference_indices[index] << "\n"; - } - } - - std::string order_description = reverse ? "descending" : "ascending"; - std::cerr << "Sorted " << order_description << " sequence. Result: " << (success ? "Ok." : "Fail!") << "\n"; -} - - -template -static void sortBitonicSortWithPodArrays(const std::vector & data, std::vector & indices, bool ascending = true) -{ - DB::PaddedPODArray pod_array_data = DB::PaddedPODArray(data.size()); - DB::IColumn::Permutation pod_array_indices = DB::IColumn::Permutation(data.size()); - - for (size_t index = 0; index < data.size(); ++index) - { - *(pod_array_data.data() + index) = data[index]; - *(pod_array_indices.data() + index) = index; - } - - BitonicSort::getInstance().sort(pod_array_data, pod_array_indices, ascending); - - for (size_t index = 0; index < data.size(); ++index) - indices[index] = pod_array_indices[index]; -} - - -template -static void testBitonicSort(const std::string & test_name, Type min_value, Type max_value) -{ - std::cerr << test_name << std::endl; - - std::vector data; - generateTest(data, min_value, max_value); - - std::vector indices(data.size()); - - sortBitonicSortWithPodArrays(data, indices, true); - check(indices, false); - - sortBitonicSortWithPodArrays(data, indices, false); - check(indices, true); -} - - -static void straightforwardTests() -{ - testBitonicSort("Test 01: Int8.", CHAR_MIN, CHAR_MAX); - testBitonicSort("Test 02: UInt8.", 0, UCHAR_MAX); - testBitonicSort("Test 03: Int16.", SHRT_MIN, SHRT_MAX); - testBitonicSort("Test 04: UInt16.", 0, USHRT_MAX); - testBitonicSort("Test 05: Int32.", INT_MIN, INT_MAX); - testBitonicSort("Test 06: UInt32.", 0, UINT_MAX); - testBitonicSort("Test 07: Int64.", LONG_MIN, LONG_MAX); - testBitonicSort("Test 08: UInt64.", 0, ULONG_MAX); -} - - -template -static void bitonicSort(std::vector & data) -{ - size_t size = data.size(); - std::vector indices(size); - for (size_t i = 0; i < size; ++i) - indices[i] = i; - - sortBitonicSortWithPodArrays(data, indices); - - std::vector result(size); - for (size_t i = 0; i < size; ++i) - result[i] = data[indices[i]]; - - data = std::move(result); -} - - -template -static bool checkSort(const std::vector & data, size_t size) -{ - std::vector copy1(data.begin(), data.begin() + size); - std::vector copy2(data.begin(), data.begin() + size); - - std::sort(copy1.data(), copy1.data() + size); - bitonicSort(copy2); - - for (size_t i = 0; i < size; ++i) - if (copy1[i] != copy2[i]) - return false; - - return true; -} - - -int main() -{ - BitonicSort::getInstance().configure(); - - straightforwardTests(); - - size_t size = 1100; - std::vector data(size); - for (size_t i = 0; i < size; ++i) - data[i] = rand(); - - for (size_t i = 0; i < 128; ++i) - { - if (!checkSort(data, i)) - { - std::cerr << "fail at length " << i << std::endl; - return 1; - } - } - - for (size_t i = 128; i < size; i += 7) - { - if (!checkSort(data, i)) - { - std::cerr << "fail at length " << i << std::endl; - return 1; - } - } - - return 0; -} diff --git a/src/Common/tests/int_hashes_perf.cpp b/src/Common/tests/int_hashes_perf.cpp index 7e8495cef27..569e9273bc4 100644 --- a/src/Common/tests/int_hashes_perf.cpp +++ b/src/Common/tests/int_hashes_perf.cpp @@ -187,11 +187,6 @@ static inline size_t tabulation(UInt64 x) return res; } -static inline size_t _intHash64(UInt64 x) -{ - return static_cast(intHash64(x)); -} - const size_t BUF_SIZE = 1024; @@ -284,7 +279,7 @@ int main(int argc, char ** argv) if (!method || method == 1) test (n, data.data(), "0: identity"); if (!method || method == 2) test (n, data.data(), "1: intHash32"); - if (!method || method == 3) test<_intHash64>(n, data.data(), "2: intHash64"); + if (!method || method == 3) test (n, data.data(), "2: intHash64"); if (!method || method == 4) test (n, data.data(), "3: two rounds"); if (!method || method == 5) test (n, data.data(), "4: two rounds and two variables"); if (!method || method == 6) test (n, data.data(), "5: two rounds with less ops"); diff --git a/src/Common/ya.make b/src/Common/ya.make index 72f1fa42756..fb04ecaa141 100644 --- a/src/Common/ya.make +++ b/src/Common/ya.make @@ -50,6 +50,7 @@ SRCS( formatIPv6.cpp formatReadable.cpp getExecutablePath.cpp + getMappedArea.cpp getMultipleKeysFromConfig.cpp getNumberOfPhysicalCPUCores.cpp hasLinuxCapability.cpp @@ -98,6 +99,7 @@ SRCS( ThreadProfileEvents.cpp ThreadStatus.cpp TraceCollector.cpp + UnicodeBar.cpp UTF8Helpers.cpp WeakHash.cpp ZooKeeper/IKeeper.cpp diff --git a/src/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp index 941ee003c94..7165d73b7d0 100644 --- a/src/Core/ExternalResultDescription.cpp +++ b/src/Core/ExternalResultDescription.cpp @@ -74,6 +74,8 @@ void ExternalResultDescription::init(const Block & sample_block_) types.emplace_back(ValueType::vtDecimal64, is_nullable); else if (typeid_cast *>(type)) types.emplace_back(ValueType::vtDecimal128, is_nullable); + else if (typeid_cast *>(type)) + types.emplace_back(ValueType::vtDecimal256, is_nullable); else throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; } diff --git a/src/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h index 29294fcf2c8..f8ba2a6bba2 100644 --- a/src/Core/ExternalResultDescription.h +++ b/src/Core/ExternalResultDescription.h @@ -29,7 +29,8 @@ struct ExternalResultDescription vtDateTime64, vtDecimal32, vtDecimal64, - vtDecimal128 + vtDecimal128, + vtDecimal256 }; Block sample_block; diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index e7f113ba7af..1179c0eb46b 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -14,6 +15,7 @@ namespace ErrorCodes { extern const int UNKNOWN_EXCEPTION; extern const int LOGICAL_ERROR; + extern const int ATTEMPT_TO_READ_AFTER_EOF; } namespace MySQLReplication @@ -49,14 +51,13 @@ namespace MySQLReplication { payload.readStrict(reinterpret_cast(&binlog_version), 2); assert(binlog_version == EVENT_VERSION_V4); + server_version.resize(50); payload.readStrict(reinterpret_cast(server_version.data()), 50); payload.readStrict(reinterpret_cast(&create_timestamp), 4); payload.readStrict(reinterpret_cast(&event_header_length), 1); assert(event_header_length == EVENT_HEADER_LENGTH); - size_t len = header.event_size - (2 + 50 + 4 + 1 + EVENT_HEADER_LENGTH) - 1; - event_type_header_length.resize(len); - payload.readStrict(reinterpret_cast(event_type_header_length.data()), len); + readStringUntilEOF(event_type_header_length, payload); } void FormatDescriptionEvent::dump(std::ostream & out) const @@ -72,9 +73,7 @@ namespace MySQLReplication void RotateEvent::parseImpl(ReadBuffer & payload) { payload.readStrict(reinterpret_cast(&position), 8); - size_t len = header.event_size - EVENT_HEADER_LENGTH - 8 - CHECKSUM_CRC32_SIGNATURE_LENGTH; - next_binlog.resize(len); - payload.readStrict(reinterpret_cast(next_binlog.data()), len); + readStringUntilEOF(next_binlog, payload); } void RotateEvent::dump(std::ostream & out) const @@ -100,9 +99,7 @@ namespace MySQLReplication payload.readStrict(reinterpret_cast(schema.data()), schema_len); payload.ignore(1); - size_t len = payload.available() - CHECKSUM_CRC32_SIGNATURE_LENGTH; - query.resize(len); - payload.readStrict(reinterpret_cast(query.data()), len); + readStringUntilEOF(query, payload); if (query.starts_with("BEGIN") || query.starts_with("COMMIT")) { typ = QUERY_EVENT_MULTI_TXN_FLAG; @@ -198,10 +195,9 @@ namespace MySQLReplication case MYSQL_TYPE_LONGLONG: case MYSQL_TYPE_INT24: case MYSQL_TYPE_DATE: - case MYSQL_TYPE_TIME: case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_NEWDATE: { + case MYSQL_TYPE_NEWDATE: + { /// No data here. column_meta.emplace_back(0); break; @@ -211,16 +207,15 @@ namespace MySQLReplication case MYSQL_TYPE_DOUBLE: case MYSQL_TYPE_TIMESTAMP2: case MYSQL_TYPE_DATETIME2: - case MYSQL_TYPE_TIME2: - case MYSQL_TYPE_JSON: case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_GEOMETRY: { + { column_meta.emplace_back(UInt16(meta[pos])); pos += 1; break; } case MYSQL_TYPE_NEWDECIMAL: - case MYSQL_TYPE_STRING: { + case MYSQL_TYPE_STRING: + { /// Big-Endian auto b0 = UInt16(meta[pos] << 8); auto b1 = UInt8(meta[pos + 1]); @@ -228,8 +223,6 @@ namespace MySQLReplication pos += 2; break; } - - case MYSQL_TYPE_BIT: case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: { /// Little-Endian @@ -285,7 +278,7 @@ namespace MySQLReplication break; } - while (payload.available() > CHECKSUM_CRC32_SIGNATURE_LENGTH) + while (!payload.eof()) { parseRow(payload, columns_present_bitmap1); if (header.type == UPDATE_ROWS_EVENT_V1 || header.type == UPDATE_ROWS_EVENT_V2) @@ -358,71 +351,65 @@ namespace MySQLReplication switch (field_type) { - case MYSQL_TYPE_TINY: { + case MYSQL_TYPE_TINY: + { UInt8 val = 0; payload.readStrict(reinterpret_cast(&val), 1); row.push_back(Field{UInt8{val}}); break; } - case MYSQL_TYPE_SHORT: { + case MYSQL_TYPE_SHORT: + { UInt16 val = 0; payload.readStrict(reinterpret_cast(&val), 2); row.push_back(Field{UInt16{val}}); break; } - case MYSQL_TYPE_INT24: { + case MYSQL_TYPE_INT24: + { Int32 val = 0; payload.readStrict(reinterpret_cast(&val), 3); row.push_back(Field{Int32{val}}); break; } - case MYSQL_TYPE_LONG: { + case MYSQL_TYPE_LONG: + { UInt32 val = 0; payload.readStrict(reinterpret_cast(&val), 4); row.push_back(Field{UInt32{val}}); break; } - case MYSQL_TYPE_LONGLONG: { + case MYSQL_TYPE_LONGLONG: + { UInt64 val = 0; payload.readStrict(reinterpret_cast(&val), 8); row.push_back(Field{UInt64{val}}); break; } - case MYSQL_TYPE_FLOAT: { + case MYSQL_TYPE_FLOAT: + { Float32 val = 0; payload.readStrict(reinterpret_cast(&val), 4); row.push_back(Field{Float32{val}}); break; } - case MYSQL_TYPE_DOUBLE: { + case MYSQL_TYPE_DOUBLE: + { Float64 val = 0; payload.readStrict(reinterpret_cast(&val), 8); row.push_back(Field{Float64{val}}); break; } - case MYSQL_TYPE_TIMESTAMP: { + case MYSQL_TYPE_TIMESTAMP: + { UInt32 val = 0; payload.readStrict(reinterpret_cast(&val), 4); row.push_back(Field{val}); break; } - case MYSQL_TYPE_TIME: { - UInt32 i24 = 0; - payload.readStrict(reinterpret_cast(&i24), 3); - - String time_buff; - time_buff.resize(8); - sprintf( - time_buff.data(), - "%02d:%02d:%02d", - static_cast(i24 / 10000), - static_cast(i24 % 10000) / 100, - static_cast(i24 % 100)); - row.push_back(Field{String{time_buff}}); - break; - } - case MYSQL_TYPE_DATE: { + case MYSQL_TYPE_DATE: + { UInt32 i24 = 0; payload.readStrict(reinterpret_cast(&i24), 3); @@ -432,60 +419,12 @@ namespace MySQLReplication row.push_back(Field(date_day_number.toUnderType())); break; } - case MYSQL_TYPE_YEAR: { - Int32 val = 0; - payload.readStrict(reinterpret_cast(&val), 1); - - String time_buff; - time_buff.resize(4); - sprintf(time_buff.data(), "%04d", (val + 1900)); - row.push_back(Field{String{time_buff}}); - break; - } - case MYSQL_TYPE_TIME2: { - UInt32 val = 0, frac_part = 0; - - readBigEndianStrict(payload, reinterpret_cast(&val), 3); - if (readBits(val, 0, 1, 24) == 0) - { - val = ~val + 1; - } - UInt32 hour = readBits(val, 2, 10, 24); - UInt32 minute = readBits(val, 12, 6, 24); - UInt32 second = readBits(val, 18, 6, 24); - readTimeFractionalPart(payload, reinterpret_cast(&frac_part), meta); - - if (frac_part != 0) - { - String time_buff; - time_buff.resize(15); - sprintf( - time_buff.data(), - "%02d:%02d:%02d.%06d", - static_cast(hour), - static_cast(minute), - static_cast(second), - static_cast(frac_part)); - row.push_back(Field{String{time_buff}}); - } - else - { - String time_buff; - time_buff.resize(8); - sprintf( - time_buff.data(), - "%02d:%02d:%02d", - static_cast(hour), - static_cast(minute), - static_cast(second)); - row.push_back(Field{String{time_buff}}); - } - break; - } - case MYSQL_TYPE_DATETIME2: { - Int64 val = 0, fsp = 0; + case MYSQL_TYPE_DATETIME2: + { + Int64 val = 0; + UInt32 fsp = 0; readBigEndianStrict(payload, reinterpret_cast(&val), 5); - readTimeFractionalPart(payload, reinterpret_cast(&fsp), meta); + readTimeFractionalPart(payload, fsp, meta); UInt32 year_month = readBits(val, 1, 17, 40); time_t date_time = DateLUT::instance().makeDateTime( @@ -493,138 +432,130 @@ namespace MySQLReplication , readBits(val, 23, 5, 40), readBits(val, 28, 6, 40), readBits(val, 34, 6, 40) ); - row.push_back(Field{UInt32(date_time)}); + if (!meta) + row.push_back(Field{UInt32(date_time)}); + else + { + DB::DecimalUtils::DecimalComponents components{ + static_cast(date_time), 0}; + + components.fractional = fsp; + row.push_back(Field(DecimalUtils::decimalFromComponents(components, meta))); + } + break; } - case MYSQL_TYPE_TIMESTAMP2: { + case MYSQL_TYPE_TIMESTAMP2: + { UInt32 sec = 0, fsp = 0; readBigEndianStrict(payload, reinterpret_cast(&sec), 4); - readTimeFractionalPart(payload, reinterpret_cast(&fsp), meta); - row.push_back(Field{sec}); + readTimeFractionalPart(payload, fsp, meta); + + if (!meta) + row.push_back(Field{sec}); + else + { + DB::DecimalUtils::DecimalComponents components{ + static_cast(sec), 0}; + + components.fractional = fsp; + row.push_back(Field(DecimalUtils::decimalFromComponents(components, meta))); + } + break; } - case MYSQL_TYPE_NEWDECIMAL: { - Int8 digits_per_integer = 9; - Int8 precision = meta >> 8; - Int8 decimals = meta & 0xff; - const char compressed_byte_map[] = {0, 1, 1, 2, 2, 3, 3, 4, 4, 4}; - - Int8 integral = (precision - decimals); - UInt32 uncompressed_integers = integral / digits_per_integer; - UInt32 uncompressed_decimals = decimals / digits_per_integer; - UInt32 compressed_integers = integral - (uncompressed_integers * digits_per_integer); - UInt32 compressed_decimals = decimals - (uncompressed_decimals * digits_per_integer); - - String buff; - UInt32 bytes_to_read = uncompressed_integers * 4 + compressed_byte_map[compressed_integers] - + uncompressed_decimals * 4 + compressed_byte_map[compressed_decimals]; - buff.resize(bytes_to_read); - payload.readStrict(reinterpret_cast(buff.data()), bytes_to_read); - - String format; - format.resize(0); - - bool is_negative = ((buff[0] & 0x80) == 0); - if (is_negative) + case MYSQL_TYPE_NEWDECIMAL: + { + const auto & dispatch = [](const size_t & precision, const size_t & scale, const auto & function) -> Field { - format += "-"; - } - buff[0] ^= 0x80; + if (precision <= DecimalUtils::maxPrecision()) + return Field(function(precision, scale, Decimal32())); + else if (precision <= DecimalUtils::maxPrecision()) + return Field(function(precision, scale, Decimal64())); + else if (precision <= DecimalUtils::maxPrecision()) + return Field(function(precision, scale, Decimal128())); - ReadBufferFromString reader(buff); - /// Compressed part. - if (compressed_integers != 0) - { - Int64 val = 0; - UInt8 to_read = compressed_byte_map[compressed_integers]; - readBigEndianStrict(reader, reinterpret_cast(&val), to_read); - format += std::to_string(val); - } + return Field(function(precision, scale, Decimal256())); + }; - for (auto k = 0U; k < uncompressed_integers; k++) + const auto & read_decimal = [&](const size_t & precision, const size_t & scale, auto decimal) { - UInt32 val = 0; - readBigEndianStrict(reader, reinterpret_cast(&val), 4); - format += std::to_string(val); - } - format += "."; - for (auto k = 0U; k < uncompressed_decimals; k++) - { - UInt32 val = 0; - reader.readStrict(reinterpret_cast(&val), 4); - format += std::to_string(val); - } + using DecimalType = decltype(decimal); + static constexpr size_t digits_per_integer = 9; + static const size_t compressed_bytes_map[] = {0, 1, 1, 2, 2, 3, 3, 4, 4, 4}; + static const size_t compressed_integer_align_numbers[] = { + 0x0, 0xFF, 0xFF, 0xFFFF, 0xFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; + + UInt32 mask = 0; + DecimalType res(0); + + if ((*payload.position() & 0x80) == 0) + mask = UInt32(-1); + + *payload.position() ^= 0x80; - /// Compressed part. - if (compressed_decimals != 0) - { - Int64 val = 0; - String compressed_buff; - UInt8 to_read = compressed_byte_map[compressed_decimals]; - switch (to_read) { - case 1: { - reader.readStrict(reinterpret_cast(&val), 1); - break; - } - case 2: { - readBigEndianStrict(reader, reinterpret_cast(&val), 2); - break; - } - case 3: { - readBigEndianStrict(reader, reinterpret_cast(&val), 3); - break; - } - case 4: { - readBigEndianStrict(reader, reinterpret_cast(&val), 4); - break; - } - default: - break; - } - format += std::to_string(val); - } - row.push_back(Field{String{format}}); - break; - } - case MYSQL_TYPE_ENUM: { - Int32 val = 0; - Int32 len = (meta & 0xff); - switch (len) - { - case 1: { - payload.readStrict(reinterpret_cast(&val), 1); - break; - } - case 2: { - payload.readStrict(reinterpret_cast(&val), 2); - break; - } - default: - break; - } - row.push_back(Field{Int32{val}}); - break; - } - case MYSQL_TYPE_BIT: { - UInt32 bits = ((meta >> 8) * 8) + (meta & 0xff); - UInt32 size = (bits + 7) / 8; + size_t integral = (precision - scale); + size_t uncompressed_integers = integral / digits_per_integer; + size_t compressed_integers = integral - (uncompressed_integers * digits_per_integer); - Bitmap bitmap1; - readBitmap(payload, bitmap1, size); - row.push_back(Field{UInt64{bitmap1.to_ulong()}}); - break; - } - case MYSQL_TYPE_SET: { - UInt32 size = (meta & 0xff); + /// Compressed part. + if (compressed_integers != 0) + { + UInt32 val = 0; + size_t to_read = compressed_bytes_map[compressed_integers]; + readBigEndianStrict(payload, reinterpret_cast(&val), to_read); + res += (val ^ (mask & compressed_integer_align_numbers[compressed_integers])); + } - Bitmap bitmap1; - readBitmap(payload, bitmap1, size); - row.push_back(Field{UInt64{bitmap1.to_ulong()}}); + for (auto k = 0U; k < uncompressed_integers; k++) + { + UInt32 val = 0; + readBigEndianStrict(payload, reinterpret_cast(&val), 4); + res *= intExp10OfSize(digits_per_integer); + res += (val ^ mask); + } + } + + { + size_t uncompressed_decimals = scale / digits_per_integer; + size_t compressed_decimals = scale - (uncompressed_decimals * digits_per_integer); + + for (auto k = 0U; k < uncompressed_decimals; k++) + { + UInt32 val = 0; + readBigEndianStrict(payload, reinterpret_cast(&val), 4); + res *= intExp10OfSize(digits_per_integer); + res += (val ^ mask); + } + + /// Compressed part. + if (compressed_decimals != 0) + { + UInt32 val = 0; + size_t to_read = compressed_bytes_map[compressed_decimals]; + + if (to_read) + { + readBigEndianStrict(payload, reinterpret_cast(&val), to_read); + res *= intExp10OfSize(compressed_decimals); + res += (val ^ (mask & compressed_integer_align_numbers[compressed_decimals])); + } + } + } + + if (mask != 0) + res *= -1; + + return res; + }; + + row.push_back(dispatch((meta >> 8) & 0xFF, meta & 0xFF, read_decimal)); break; } case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_VAR_STRING: { + case MYSQL_TYPE_VAR_STRING: + { uint32_t size = 0; if (meta < 256) { @@ -641,7 +572,8 @@ namespace MySQLReplication row.push_back(Field{String{val}}); break; } - case MYSQL_TYPE_STRING: { + case MYSQL_TYPE_STRING: + { UInt32 size = 0; if (field_len < 256) { @@ -658,8 +590,8 @@ namespace MySQLReplication row.push_back(Field{String{val}}); break; } - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_BLOB: { + case MYSQL_TYPE_BLOB: + { UInt32 size = 0; switch (meta) { @@ -689,16 +621,6 @@ namespace MySQLReplication row.push_back(Field{String{val}}); break; } - case MYSQL_TYPE_JSON: { - UInt32 size = 0; - payload.readStrict(reinterpret_cast(&size), meta); - - String val; - val.resize(size); - payload.readStrict(reinterpret_cast(val.data()), size); - row.push_back(Field{String{val}}); - break; - } default: throw ReplicationError( "ParseRow: Unhandled MySQL field type:" + std::to_string(field_type), ErrorCodes::UNKNOWN_EXCEPTION); @@ -738,7 +660,7 @@ namespace MySQLReplication payload.readStrict(reinterpret_cast(>id.seq_no), 8); /// Skip others. - payload.ignore(payload.available() - CHECKSUM_CRC32_SIGNATURE_LENGTH); + payload.ignoreAll(); } void GTIDEvent::dump(std::ostream & out) const @@ -751,7 +673,7 @@ namespace MySQLReplication out << "GTID Next: " << gtid_next << std::endl; } - void DryRunEvent::parseImpl(ReadBuffer & payload) { payload.ignore(header.event_size - EVENT_HEADER_LENGTH); } + void DryRunEvent::parseImpl(ReadBuffer & payload) { payload.ignoreAll(); } void DryRunEvent::dump(std::ostream & out) const { @@ -804,6 +726,9 @@ namespace MySQLReplication void MySQLFlavor::readPayloadImpl(ReadBuffer & payload) { + if (payload.eof()) + throw Exception("Attempt to read after EOF.", ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF); + UInt16 header = static_cast(*payload.position()); switch (header) { @@ -814,37 +739,42 @@ namespace MySQLReplication err.readPayloadWithUnpacked(payload); throw ReplicationError(err.error_message, ErrorCodes::UNKNOWN_EXCEPTION); } - // skip the header flag. + // skip the generic response packets header flag. payload.ignore(1); - EventType event_type = static_cast(*(payload.position() + 4)); - switch (event_type) + MySQLBinlogEventReadBuffer event_payload(payload); + + EventHeader event_header; + event_header.parse(event_payload); + + switch (event_header.type) { - case FORMAT_DESCRIPTION_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case FORMAT_DESCRIPTION_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); position.update(event); break; } - case ROTATE_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case ROTATE_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); position.update(event); break; } - case QUERY_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case QUERY_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); auto query = std::static_pointer_cast(event); switch (query->typ) { case QUERY_EVENT_MULTI_TXN_FLAG: - case QUERY_EVENT_XA: { - event = std::make_shared(); + case QUERY_EVENT_XA: + { + event = std::make_shared(std::move(query->header)); break; } default: @@ -852,68 +782,67 @@ namespace MySQLReplication } break; } - case XID_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case XID_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); position.update(event); break; } - case TABLE_MAP_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case TABLE_MAP_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); table_map = std::static_pointer_cast(event); break; } case WRITE_ROWS_EVENT_V1: - case WRITE_ROWS_EVENT_V2: { + case WRITE_ROWS_EVENT_V2: + { if (do_replicate()) - event = std::make_shared(table_map); + event = std::make_shared(table_map, std::move(event_header)); else - event = std::make_shared(); + event = std::make_shared(std::move(event_header)); - event->parseHeader(payload); - event->parseEvent(payload); + event->parseEvent(event_payload); break; } case DELETE_ROWS_EVENT_V1: - case DELETE_ROWS_EVENT_V2: { + case DELETE_ROWS_EVENT_V2: + { if (do_replicate()) - event = std::make_shared(table_map); + event = std::make_shared(table_map, std::move(event_header)); else - event = std::make_shared(); + event = std::make_shared(std::move(event_header)); - event->parseHeader(payload); - event->parseEvent(payload); + event->parseEvent(event_payload); break; } case UPDATE_ROWS_EVENT_V1: - case UPDATE_ROWS_EVENT_V2: { + case UPDATE_ROWS_EVENT_V2: + { if (do_replicate()) - event = std::make_shared(table_map); + event = std::make_shared(table_map, std::move(event_header)); else - event = std::make_shared(); + event = std::make_shared(std::move(event_header)); - event->parseHeader(payload); - event->parseEvent(payload); + event->parseEvent(event_payload); break; } - case GTID_EVENT: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + case GTID_EVENT: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); position.update(event); break; } - default: { - event = std::make_shared(); - event->parseHeader(payload); - event->parseEvent(payload); + default: + { + event = std::make_shared(std::move(event_header)); + event->parseEvent(event_payload); break; } } - payload.ignoreAll(); } } diff --git a/src/Core/MySQL/MySQLReplication.h b/src/Core/MySQL/MySQLReplication.h index ad5e53ed200..6f5b4cf0a1e 100644 --- a/src/Core/MySQL/MySQLReplication.h +++ b/src/Core/MySQL/MySQLReplication.h @@ -19,7 +19,6 @@ namespace MySQLReplication { static const int EVENT_VERSION_V4 = 4; static const int EVENT_HEADER_LENGTH = 19; - static const int CHECKSUM_CRC32_SIGNATURE_LENGTH = 4; using Bitmap = boost::dynamic_bitset<>; @@ -37,23 +36,41 @@ namespace MySQLReplication std::reverse(start, end); } - inline void readTimeFractionalPart(ReadBuffer & payload, char * to, UInt16 meta) + inline void readTimeFractionalPart(ReadBuffer & payload, UInt32 & factional, UInt16 meta) { switch (meta) { case 1: - case 2: { - readBigEndianStrict(payload, to, 1); + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 1); + factional /= 10; + break; + } + case 2: + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 1); break; } case 3: - case 4: { - readBigEndianStrict(payload, to, 2); + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 2); + factional /= 10; + break; + } + case 4: + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 2); break; } case 5: - case 6: { - readBigEndianStrict(payload, to, 3); + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 3); + factional /= 10; + break; + } + case 6: + { + readBigEndianStrict(payload, reinterpret_cast(&factional), 3); break; } default: @@ -301,9 +318,10 @@ namespace MySQLReplication public: EventHeader header; + EventBase(EventHeader && header_) : header(std::move(header_)) {} + virtual ~EventBase() = default; virtual void dump(std::ostream & out) const = 0; - virtual void parseHeader(ReadBuffer & payload) { header.parse(payload); } virtual void parseEvent(ReadBuffer & payload) { parseImpl(payload); } virtual MySQLEventType type() const { return MYSQL_UNHANDLED_EVENT; } @@ -314,7 +332,10 @@ namespace MySQLReplication class FormatDescriptionEvent : public EventBase { public: - FormatDescriptionEvent() : binlog_version(0), create_timestamp(0), event_header_length(0) { } + FormatDescriptionEvent(EventHeader && header_) + : EventBase(std::move(header_)), binlog_version(0), create_timestamp(0), event_header_length(0) + { + } protected: UInt16 binlog_version; @@ -336,7 +357,7 @@ namespace MySQLReplication UInt64 position; String next_binlog; - RotateEvent() : position(0) { } + RotateEvent(EventHeader && header_) : EventBase(std::move(header_)), position(0) {} void dump(std::ostream & out) const override; protected: @@ -363,7 +384,11 @@ namespace MySQLReplication String query; QueryType typ = QUERY_EVENT_DDL; - QueryEvent() : thread_id(0), exec_time(0), schema_len(0), error_code(0), status_len(0) { } + QueryEvent(EventHeader && header_) + : EventBase(std::move(header_)), thread_id(0), exec_time(0), schema_len(0), error_code(0), status_len(0) + { + } + void dump(std::ostream & out) const override; MySQLEventType type() const override { return MYSQL_QUERY_EVENT; } @@ -374,7 +399,7 @@ namespace MySQLReplication class XIDEvent : public EventBase { public: - XIDEvent() : xid(0) { } + XIDEvent(EventHeader && header_) : EventBase(std::move(header_)), xid(0) {} protected: UInt64 xid; @@ -397,7 +422,7 @@ namespace MySQLReplication std::vector column_meta; Bitmap null_bitmap; - TableMapEvent() : table_id(0), flags(0), schema_len(0), table_len(0), column_count(0) { } + TableMapEvent(EventHeader && header_) : EventBase(std::move(header_)), table_id(0), flags(0), schema_len(0), table_len(0), column_count(0) {} void dump(std::ostream & out) const override; protected: @@ -413,8 +438,8 @@ namespace MySQLReplication String table; std::vector rows; - RowsEvent(std::shared_ptr table_map_) - : number_columns(0), table_id(0), flags(0), extra_data_len(0), table_map(table_map_) + RowsEvent(std::shared_ptr table_map_, EventHeader && header_) + : EventBase(std::move(header_)), number_columns(0), table_id(0), flags(0), extra_data_len(0), table_map(table_map_) { schema = table_map->schema; table = table_map->table; @@ -439,21 +464,21 @@ namespace MySQLReplication class WriteRowsEvent : public RowsEvent { public: - WriteRowsEvent(std::shared_ptr table_map_) : RowsEvent(table_map_) { } + WriteRowsEvent(std::shared_ptr table_map_, EventHeader && header_) : RowsEvent(table_map_, std::move(header_)) {} MySQLEventType type() const override { return MYSQL_WRITE_ROWS_EVENT; } }; class DeleteRowsEvent : public RowsEvent { public: - DeleteRowsEvent(std::shared_ptr table_map_) : RowsEvent(table_map_) { } + DeleteRowsEvent(std::shared_ptr table_map_, EventHeader && header_) : RowsEvent(table_map_, std::move(header_)) {} MySQLEventType type() const override { return MYSQL_DELETE_ROWS_EVENT; } }; class UpdateRowsEvent : public RowsEvent { public: - UpdateRowsEvent(std::shared_ptr table_map_) : RowsEvent(table_map_) { } + UpdateRowsEvent(std::shared_ptr table_map_, EventHeader && header_) : RowsEvent(table_map_, std::move(header_)) {} MySQLEventType type() const override { return MYSQL_UPDATE_ROWS_EVENT; } }; @@ -463,7 +488,7 @@ namespace MySQLReplication UInt8 commit_flag; GTID gtid; - GTIDEvent() : commit_flag(0) { } + GTIDEvent(EventHeader && header_) : EventBase(std::move(header_)), commit_flag(0) {} void dump(std::ostream & out) const override; protected: @@ -472,6 +497,8 @@ namespace MySQLReplication class DryRunEvent : public EventBase { + public: + DryRunEvent(EventHeader && header_) : EventBase(std::move(header_)) {} void dump(std::ostream & out) const override; protected: diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 7d0f54fe725..5f74cbb154d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -88,6 +88,7 @@ class IColumn; M(UInt64, replication_alter_columns_timeout, 60, "Wait for actions to change the table structure within the specified number of seconds. 0 - wait unlimited time.", 0) \ \ M(LoadBalancing, load_balancing, LoadBalancing::RANDOM, "Which replicas (among healthy replicas) to preferably send a query to (on the first attempt) for distributed processing.", 0) \ + M(UInt64, load_balancing_first_offset, 0, "Which replica to preferably send a query when FIRST_OR_RANDOM load balancing strategy is used.", 0) \ \ M(TotalsMode, totals_mode, TotalsMode::AFTER_HAVING_EXCLUSIVE, "How to calculate TOTALS when HAVING is present, as well as when max_rows_to_group_by and group_by_overflow_mode = ‘any’ are present.", IMPORTANT) \ M(Float, totals_auto_threshold, 0.5, "The threshold for totals_mode = 'auto'.", 0) \ @@ -104,8 +105,6 @@ class IColumn; M(UInt64, parallel_replicas_count, 0, "", 0) \ M(UInt64, parallel_replica_offset, 0, "", 0) \ \ - M(SpecialSort, special_sort, SpecialSort::NOT_SPECIFIED, "Specifies a sorting algorithm which will be using in ORDER BY query.", 0) \ - \ M(Bool, skip_unavailable_shards, false, "If 1, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \ \ M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard, if 1 SELECT is executed on each shard, if 2 SELECT and INSERT is executed on each shard", 0) \ @@ -233,6 +232,10 @@ class IColumn; M(UInt64, max_bytes_to_read, 0, "Limit on read bytes (after decompression) from the most 'deep' sources. That is, only in the deepest subquery. When reading from a remote server, it is only checked on a remote server.", 0) \ M(OverflowMode, read_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ \ + M(UInt64, max_rows_to_read_leaf, 0, "Limit on read rows on the leaf nodes for distributed queries. Limit is applied for local reads only excluding the final merge stage on the root node.", 0) \ + M(UInt64, max_bytes_to_read_leaf, 0, "Limit on read bytes (after decompression) on the leaf nodes for distributed queries. Limit is applied for local reads only excluding the final merge stage on the root node.", 0) \ + M(OverflowMode, read_overflow_mode_leaf, OverflowMode::THROW, "What to do when the leaf limit is exceeded.", 0) \ + \ M(UInt64, max_rows_to_group_by, 0, "", 0) \ M(OverflowModeGroupBy, group_by_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ M(UInt64, max_bytes_before_external_group_by, 0, "", 0) \ diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index c0d2906e2fc..b4db51a506d 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -23,11 +23,6 @@ IMPLEMENT_SETTING_ENUM(LoadBalancing, ErrorCodes::UNKNOWN_LOAD_BALANCING, {"round_robin", LoadBalancing::ROUND_ROBIN}}) -IMPLEMENT_SETTING_ENUM(SpecialSort, ErrorCodes::UNKNOWN_JOIN, - {{"not_specified", SpecialSort::NOT_SPECIFIED}, - {"opencl_bitonic", SpecialSort::OPENCL_BITONIC}}) - - IMPLEMENT_SETTING_ENUM(JoinStrictness, ErrorCodes::UNKNOWN_JOIN, {{"", JoinStrictness::Unspecified}, {"ALL", JoinStrictness::ALL}, diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 7ed5ffb0c35..426497fff78 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -47,15 +47,6 @@ enum class JoinAlgorithm DECLARE_SETTING_ENUM(JoinAlgorithm) -enum class SpecialSort -{ - NOT_SPECIFIED = 0, - OPENCL_BITONIC, -}; - -DECLARE_SETTING_ENUM(SpecialSort) - - /// Which rows should be included in TOTALS. enum class TotalsMode { diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index 2198789b0b6..bd3b7bc45ff 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -32,22 +32,20 @@ struct SortColumnDescription std::shared_ptr collator; /// Collator for locale-specific comparison of strings bool with_fill; FillColumnDescription fill_description; - SpecialSort special_sort; - SortColumnDescription( size_t column_number_, int direction_, int nulls_direction_, - const std::shared_ptr & collator_ = nullptr, SpecialSort special_sort_ = SpecialSort::NOT_SPECIFIED, + const std::shared_ptr & collator_ = nullptr, bool with_fill_ = false, const FillColumnDescription & fill_description_ = {}) : column_number(column_number_), direction(direction_), nulls_direction(nulls_direction_), collator(collator_) - , with_fill(with_fill_), fill_description(fill_description_), special_sort(special_sort_) {} + , with_fill(with_fill_), fill_description(fill_description_) {} SortColumnDescription( const std::string & column_name_, int direction_, int nulls_direction_, - const std::shared_ptr & collator_ = nullptr, SpecialSort special_sort_ = SpecialSort::NOT_SPECIFIED, + const std::shared_ptr & collator_ = nullptr, bool with_fill_ = false, const FillColumnDescription & fill_description_ = {}) : column_name(column_name_), column_number(0), direction(direction_), nulls_direction(nulls_direction_) - , collator(collator_), with_fill(with_fill_), fill_description(fill_description_), special_sort(special_sort_) {} + , collator(collator_), with_fill(with_fill_), fill_description(fill_description_) {} bool operator == (const SortColumnDescription & other) const { diff --git a/src/DataTypes/convertMySQLDataType.cpp b/src/DataTypes/convertMySQLDataType.cpp index a509cf8b091..c67f90e6408 100644 --- a/src/DataTypes/convertMySQLDataType.cpp +++ b/src/DataTypes/convertMySQLDataType.cpp @@ -40,74 +40,75 @@ DataTypePtr convertMySQLDataType(MultiEnum type_support, { // we expect mysql_data_type to be either "basic_type" or "type_with_params(param1, param2, ...)" auto data_type = std::string_view(mysql_data_type); - const auto param_start_pos = data_type.find("("); + const auto param_start_pos = data_type.find('('); const auto type_name = data_type.substr(0, param_start_pos); - DataTypePtr res = [&]() -> DataTypePtr { - if (type_name == "tinyint") - { - if (is_unsigned) - return std::make_shared(); - else - return std::make_shared(); - } - if (type_name == "smallint") - { - if (is_unsigned) - return std::make_shared(); - else - return std::make_shared(); - } - if (type_name == "int" || type_name == "mediumint") - { - if (is_unsigned) - return std::make_shared(); - else - return std::make_shared(); - } - if (type_name == "bigint") - { - if (is_unsigned) - return std::make_shared(); - else - return std::make_shared(); - } - if (type_name == "float") - return std::make_shared(); - if (type_name == "double") - return std::make_shared(); - if (type_name == "date") - return std::make_shared(); - if (type_name == "binary") - return std::make_shared(length); - if (type_name == "datetime" || type_name == "timestamp") - { - if (!type_support.isSet(MySQLDataTypesSupport::DATETIME64)) - return std::make_shared(); + DataTypePtr res; - if (type_name == "timestamp" && scale == 0) - { - return std::make_shared(); - } - else if (type_name == "datetime" || type_name == "timestamp") - { - return std::make_shared(scale); - } - } - - if (type_support.isSet(MySQLDataTypesSupport::DECIMAL) && (type_name == "numeric" || type_name == "decimal")) + if (type_name == "tinyint") + { + if (is_unsigned) + res = std::make_shared(); + else + res = std::make_shared(); + } + else if (type_name == "smallint") + { + if (is_unsigned) + res = std::make_shared(); + else + res = std::make_shared(); + } + else if (type_name == "int" || type_name == "mediumint") + { + if (is_unsigned) + res = std::make_shared(); + else + res = std::make_shared(); + } + else if (type_name == "bigint") + { + if (is_unsigned) + res = std::make_shared(); + else + res = std::make_shared(); + } + else if (type_name == "float") + res = std::make_shared(); + else if (type_name == "double") + res = std::make_shared(); + else if (type_name == "date") + res = std::make_shared(); + else if (type_name == "binary") + res = std::make_shared(length); + else if (type_name == "datetime" || type_name == "timestamp") + { + if (!type_support.isSet(MySQLDataTypesSupport::DATETIME64)) { - if (precision <= DecimalUtils::maxPrecision()) - return std::make_shared>(precision, scale); - else if (precision <= DecimalUtils::maxPrecision()) - return std::make_shared>(precision, scale); - else if (precision <= DecimalUtils::maxPrecision()) - return std::make_shared>(precision, scale); + res = std::make_shared(); } + else if (type_name == "timestamp" && scale == 0) + { + res = std::make_shared(); + } + else if (type_name == "datetime" || type_name == "timestamp") + { + res = std::make_shared(scale); + } + } + else if (type_support.isSet(MySQLDataTypesSupport::DECIMAL) && (type_name == "numeric" || type_name == "decimal")) + { + if (precision <= DecimalUtils::maxPrecision()) + res = std::make_shared>(precision, scale); + else if (precision <= DecimalUtils::maxPrecision()) + res = std::make_shared>(precision, scale); + else if (precision <= DecimalUtils::maxPrecision()) + res = std::make_shared>(precision, scale); + } - /// Also String is fallback for all unknown types. - return std::make_shared(); - }(); + /// Also String is fallback for all unknown types. + if (!res) + res = std::make_shared(); if (is_nullable) res = std::make_shared(res); diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 8f4a4522c59..ed17a8eccb1 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -277,7 +277,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path) { - bool check_file_exists = supportsRenameat2(); + bool check_file_exists = true; SCOPE_EXIT({ std::error_code code; if (check_file_exists) std::filesystem::remove(table_metadata_tmp_path, code); }); std::unique_lock lock{mutex}; @@ -286,9 +286,8 @@ void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & if (table_id.uuid != actual_table_id.uuid) throw Exception("Cannot alter table because it was renamed", ErrorCodes::CANNOT_ASSIGN_ALTER); - if (check_file_exists) - renameExchange(table_metadata_tmp_path, table_metadata_path); - else + check_file_exists = renameExchangeIfSupported(table_metadata_tmp_path, table_metadata_path); + if (!check_file_exists) std::filesystem::rename(table_metadata_tmp_path, table_metadata_path); } @@ -302,7 +301,7 @@ void DatabaseAtomic::assertDetachedTableNotInUse(const UUID & uuid) /// To avoid it, we remember UUIDs of detached tables and does not allow ATTACH table with such UUID until detached instance still in use. if (detached_tables.count(uuid)) throw Exception("Cannot attach table with UUID " + toString(uuid) + - ", because it was detached but still used by come query. Retry later.", ErrorCodes::TABLE_ALREADY_EXISTS); + ", because it was detached but still used by some query. Retry later.", ErrorCodes::TABLE_ALREADY_EXISTS); } DatabaseAtomic::DetachedTables DatabaseAtomic::cleenupDetachedTables() diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 221e54ce741..5eacb846d52 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -75,7 +75,7 @@ ASTPtr DatabaseMemory::getCreateTableQueryImpl(const String & table_name, const else return {}; } - return it->second; + return it->second->clone(); } UUID DatabaseMemory::tryGetTableUUID(const String & table_name) const diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 0512a155418..13aeb7de148 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -291,6 +291,8 @@ void DatabaseOrdinary::alterTable(const Context & context, const StorageID & tab if (metadata.table_ttl.definition_ast) storage_ast.set(storage_ast.ttl_table, metadata.table_ttl.definition_ast); + else if (storage_ast.ttl_table != nullptr) /// TTL was removed + storage_ast.ttl_table = nullptr; if (metadata.settings_changes) storage_ast.set(storage_ast.settings, metadata.settings_changes); diff --git a/src/Databases/MySQL/MaterializeMetadata.cpp b/src/Databases/MySQL/MaterializeMetadata.cpp index 74fd59dc98e..3c5bfdec594 100644 --- a/src/Databases/MySQL/MaterializeMetadata.cpp +++ b/src/Databases/MySQL/MaterializeMetadata.cpp @@ -145,7 +145,7 @@ void MaterializeMetadata::transaction(const MySQLReplication::Position & positio String persistent_tmp_path = persistent_path + ".tmp"; { - WriteBufferFromFile out(persistent_tmp_path, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT | O_EXCL); + WriteBufferFromFile out(persistent_tmp_path, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); /// TSV format metadata file. writeString("Version:\t" + toString(meta_version), out); diff --git a/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp index 465a7cb912a..e26f5c2fd52 100644 --- a/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializeMySQLSyncThread.cpp @@ -9,6 +9,7 @@ # include # include # include +# include # include # include # include @@ -453,6 +454,14 @@ static void writeFieldsToColumn( write_data_to_column(casted_float32_column, Float64(), Float32()); else if (ColumnFloat64 * casted_float64_column = typeid_cast(&column_to)) write_data_to_column(casted_float64_column, Float64(), Float64()); + else if (ColumnDecimal * casted_decimal_32_column = typeid_cast *>(&column_to)) + write_data_to_column(casted_decimal_32_column, Decimal32(), Decimal32()); + else if (ColumnDecimal * casted_decimal_64_column = typeid_cast *>(&column_to)) + write_data_to_column(casted_decimal_64_column, Decimal64(), Decimal64()); + else if (ColumnDecimal * casted_decimal_128_column = typeid_cast *>(&column_to)) + write_data_to_column(casted_decimal_128_column, Decimal128(), Decimal128()); + else if (ColumnDecimal * casted_decimal_256_column = typeid_cast *>(&column_to)) + write_data_to_column(casted_decimal_256_column, Decimal256(), Decimal256()); else if (ColumnInt32 * casted_int32_column = typeid_cast(&column_to)) { for (size_t index = 0; index < rows_data.size(); ++index) diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index 29aee9bfc21..cb39dffeb6c 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -822,7 +822,24 @@ void CacheDictionary::waitForCurrentUpdateFinish(UpdateUnitPtr & update_unit_ptr if (update_unit_ptr->current_exception) - std::rethrow_exception(update_unit_ptr->current_exception); + { + // There might have been a single update unit for multiple callers in + // independent threads, and current_exception will be the same for them. + // Don't just rethrow it, because sharing the same exception object + // between multiple threads can lead to weird effects if they decide to + // modify it, for example, by adding some error context. + try + { + std::rethrow_exception(update_unit_ptr->current_exception); + } + catch (...) + { + throw DB::Exception(ErrorCodes::CACHE_DICTIONARY_UPDATE_FAIL, + "Dictionary update failed: {}", + getCurrentExceptionMessage(true /*with stack trace*/, + true /*check embedded stack trace*/)); + } + } } void CacheDictionary::tryPushToUpdateQueueOrThrow(UpdateUnitPtr & update_unit_ptr) const diff --git a/src/Disks/IVolume.h b/src/Disks/IVolume.h index 5e7f09e1d04..eaf3bf1dbd4 100644 --- a/src/Disks/IVolume.h +++ b/src/Disks/IVolume.h @@ -36,10 +36,11 @@ using Volumes = std::vector; class IVolume : public Space { public: - IVolume(String name_, Disks disks_, size_t max_data_part_size_ = 0) + IVolume(String name_, Disks disks_, size_t max_data_part_size_ = 0, bool perform_ttl_move_on_insert_ = true) : disks(std::move(disks_)) , name(name_) , max_data_part_size(max_data_part_size_) + , perform_ttl_move_on_insert(perform_ttl_move_on_insert_) { } @@ -70,6 +71,9 @@ protected: public: /// Max size of reservation, zero means unlimited size UInt64 max_data_part_size = 0; + /// Should a new data part be synchronously moved to a volume according to ttl on insert + /// or move this part in background task asynchronously after insert. + bool perform_ttl_move_on_insert = true; }; /// Reservation for multiple disks at once. Can be used in RAID1 implementation. diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index fbd19ce1cd9..1c7a5e24282 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -4,7 +4,6 @@ #include #include "DiskS3.h" #include "Disks/DiskCacheWrapper.h" -#include "Disks/DiskCacheWrapper.cpp" #include "Disks/DiskFactory.h" #include "ProxyConfiguration.h" #include "ProxyListConfiguration.h" diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index bf9dcf7f5d8..3ac8a50acfb 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -53,6 +53,9 @@ VolumeJBOD::VolumeJBOD( static constexpr UInt64 MIN_PART_SIZE = 8u * 1024u * 1024u; if (max_data_part_size != 0 && max_data_part_size < MIN_PART_SIZE) LOG_WARNING(logger, "Volume {} max_data_part_size is too low ({} < {})", backQuote(name), ReadableSize(max_data_part_size), ReadableSize(MIN_PART_SIZE)); + + /// Default value is 'true' due to backward compatibility. + perform_ttl_move_on_insert = config.getBool(config_prefix + ".perform_ttl_move_on_insert", true); } DiskPtr VolumeJBOD::getDisk(size_t /* index */) const diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index f85680c0031..be1e254b22f 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -90,7 +90,8 @@ namespace case ValueType::vtDateTime64:[[fallthrough]]; case ValueType::vtDecimal32: [[fallthrough]]; case ValueType::vtDecimal64: [[fallthrough]]; - case ValueType::vtDecimal128: + case ValueType::vtDecimal128:[[fallthrough]]; + case ValueType::vtDecimal256: { ReadBuffer buffer(const_cast(value.data()), value.size(), 0); data_type.deserializeAsWholeText(column, buffer, FormatSettings{}); diff --git a/src/Functions/FunctionBase64Conversion.h b/src/Functions/FunctionBase64Conversion.h index 450bad496cd..b6217f9b0bc 100644 --- a/src/Functions/FunctionBase64Conversion.h +++ b/src/Functions/FunctionBase64Conversion.h @@ -4,6 +4,7 @@ #if USE_BASE64 # include +# include # include # include # include @@ -151,6 +152,10 @@ public: } } + /// Base64 library is using AVX-512 with some shuffle operations. + /// Memory sanitizer don't understand if there was uninitialized memory in SIMD register but it was not used in the result of shuffle. + __msan_unpoison(dst_pos, outlen); + source += srclen + 1; dst_pos += outlen + 1; diff --git a/src/Functions/GatherUtils/ArraySourceVisitor.h b/src/Functions/GatherUtils/ArraySourceVisitor.h index 989f9ae918a..260e9a87161 100644 --- a/src/Functions/GatherUtils/ArraySourceVisitor.h +++ b/src/Functions/GatherUtils/ArraySourceVisitor.h @@ -19,12 +19,8 @@ struct ConstSource; using NumericArraySources = typename TypeListMap::Type; using BasicArraySources = typename AppendToTypeList::Type; -using NullableArraySources = typename TypeListMap::Type; -using BasicAndNullableArraySources = typename TypeListConcat::Type; -using ConstArraySources = typename TypeListMap::Type; -using TypeListArraySources = typename TypeListConcat::Type; -class ArraySourceVisitor : public ApplyTypeListForClass::Type +class ArraySourceVisitor : public ApplyTypeListForClass::Type { protected: ~ArraySourceVisitor() = default; diff --git a/src/Functions/GatherUtils/IArraySink.h b/src/Functions/GatherUtils/IArraySink.h index a41d99b5f6a..8bc3f05edd1 100644 --- a/src/Functions/GatherUtils/IArraySink.h +++ b/src/Functions/GatherUtils/IArraySink.h @@ -13,7 +13,6 @@ namespace ErrorCodes namespace GatherUtils { -#pragma GCC visibility push(hidden) struct IArraySink { @@ -25,6 +24,8 @@ struct IArraySink } }; +#pragma GCC visibility push(hidden) + template class ArraySinkImpl : public Visitable {}; diff --git a/src/Functions/GatherUtils/IArraySource.h b/src/Functions/GatherUtils/IArraySource.h index 386108f038e..c34c3ddb683 100644 --- a/src/Functions/GatherUtils/IArraySource.h +++ b/src/Functions/GatherUtils/IArraySource.h @@ -13,7 +13,6 @@ namespace ErrorCodes namespace GatherUtils { -#pragma GCC visibility push(hidden) struct IArraySource { @@ -31,6 +30,8 @@ struct IArraySource } }; +#pragma GCC visibility push(hidden) + template class ArraySourceImpl : public Visitable {}; diff --git a/src/Functions/GatherUtils/IValueSource.h b/src/Functions/GatherUtils/IValueSource.h index 55af364a3a9..2be44143c84 100644 --- a/src/Functions/GatherUtils/IValueSource.h +++ b/src/Functions/GatherUtils/IValueSource.h @@ -13,7 +13,6 @@ namespace ErrorCodes namespace GatherUtils { -#pragma GCC visibility push(hidden) struct IValueSource { @@ -27,6 +26,8 @@ struct IValueSource virtual bool isConst() const { return false; } }; +#pragma GCC visibility push(hidden) + template class ValueSourceImpl : public Visitable {}; diff --git a/src/Functions/GatherUtils/Selectors.h b/src/Functions/GatherUtils/Selectors.h index 9c96b36460d..bbe631a6a3a 100644 --- a/src/Functions/GatherUtils/Selectors.h +++ b/src/Functions/GatherUtils/Selectors.h @@ -32,20 +32,30 @@ void callSelectMemberFunctionWithTupleArgument(Tuple & tuple, Args && ... args) callSelectMemberFunctionWithTupleArgument(tuple, args ..., std::get(tuple)); } +template +void callSelectSource(bool is_const, bool is_nullable, Tuple & tuple, Args && ... args) +{ + if constexpr (index == std::tuple_size::value) + Base::selectSource(is_const, is_nullable, args ...); + else + callSelectSource(is_const, is_nullable, tuple, args ..., std::get(tuple)); +} + template struct ArraySourceSelectorVisitor final : public ArraySourceVisitorImpl> { - explicit ArraySourceSelectorVisitor(Args && ... args) : packed_args(args ...) {} + explicit ArraySourceSelectorVisitor(IArraySource & source, Args && ... args) : packed_args(args ...), array_source(source) {} using Tuple = std::tuple; template void visitImpl(Source & source) { - callSelectMemberFunctionWithTupleArgument(packed_args, source); + callSelectSource(array_source.isConst(), array_source.isNullable(), packed_args, source); } Tuple packed_args; + IArraySource & array_source; }; template @@ -54,7 +64,7 @@ struct ArraySourceSelector template static void select(IArraySource & source, Args && ... args) { - ArraySourceSelectorVisitor visitor(args ...); + ArraySourceSelectorVisitor visitor(source, args ...); source.accept(visitor); } }; @@ -87,56 +97,6 @@ struct ArraySinkSelector } }; - -template -struct ValueSourceSelectorVisitor final : public ValueSourceVisitorImpl> -{ - explicit ValueSourceSelectorVisitor(Args && ... args) : packed_args(args ...) {} - - using Tuple = std::tuple; - - template - void visitImpl(Source & source) - { - callSelectMemberFunctionWithTupleArgument(packed_args, source); - } - - Tuple packed_args; -}; - -template -struct ValueSourceSelector -{ - template - static void select(IValueSource & source, Args && ... args) - { - ValueSourceSelectorVisitor visitor(args ...); - source.accept(visitor); - } -}; - -template -struct ArraySinkSourceSelector -{ - template - static void select(IArraySource & source, IArraySink & sink, Args && ... args) - { - ArraySinkSelector::select(sink, source, args ...); - } - - template - static void selectImpl(Sink && sink, IArraySource & source, Args && ... args) - { - ArraySourceSelector::select(source, sink, args ...); - } - - template - static void selectImpl(Source && source, Sink && sink, Args && ... args) - { - Base::selectSourceSink(source, sink, args ...); - } -}; - template struct ArraySourcePairSelector { @@ -147,15 +107,17 @@ struct ArraySourcePairSelector } template - static void selectImpl(FirstSource && first, IArraySource & second, Args && ... args) + static void selectSource(bool is_const, bool is_nullable, FirstSource && first, IArraySource & second, Args && ... args) { - ArraySourceSelector::select(second, first, args ...); + ArraySourceSelector::select(second, is_const, is_nullable, first, args ...); } template - static void selectImpl(SecondSource && second, FirstSource && first, Args && ... args) + static void selectSource(bool is_second_const, bool is_second_nullable, SecondSource && second, + bool is_first_const, bool is_first_nullable, FirstSource && first, Args && ... args) { - Base::selectSourcePair(first, second, args ...); + Base::selectSourcePair(is_first_const, is_first_nullable, first, + is_second_const, is_second_nullable, second, args ...); } }; diff --git a/src/Functions/GatherUtils/concat.cpp b/src/Functions/GatherUtils/concat.cpp index 3435baf147e..d73c98a0e88 100644 --- a/src/Functions/GatherUtils/concat.cpp +++ b/src/Functions/GatherUtils/concat.cpp @@ -24,36 +24,31 @@ struct ArrayConcat : public ArraySourceSelector using Sources = std::vector>; template - static void selectImpl(Source && source, const Sources & sources, ColumnArray::MutablePtr & result) + static void selectSource(bool /*is_const*/, bool is_nullable, Source & source, const Sources & sources, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - concat(sources, std::move(sink)); - } + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; - template - static void selectImpl(ConstSource && source, const Sources & sources, ColumnArray::MutablePtr & result) - { - using SourceType = typename std::decay::type; - using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + auto & nullable_source = static_cast(source); - concat(sources, std::move(sink)); - } - template - static void selectImpl(ConstSource & source, const Sources & sources, ColumnArray::MutablePtr & result) - { - using SourceType = typename std::decay::type; - using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - concat(sources, std::move(sink)); + concat(sources, std::move(sink)); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + concat(sources, std::move(sink)); + } } }; diff --git a/src/Functions/GatherUtils/has_all.cpp b/src/Functions/GatherUtils/has_all.cpp index 491631d0c7c..6e34a851c02 100644 --- a/src/Functions/GatherUtils/has_all.cpp +++ b/src/Functions/GatherUtils/has_all.cpp @@ -11,9 +11,53 @@ namespace struct ArrayHasAllSelectArraySourcePair : public ArraySourcePairSelector { template - static void selectSourcePair(FirstSource && first, SecondSource && second, ColumnUInt8 & result) + static void callFunction(FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) { - arrayAllAny(first, second, result); + using SourceType = typename std::decay::type; + + if (is_second_nullable) + { + using NullableSource = NullableArraySource; + + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, static_cast(second), result); + } + else + { + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, second, result); + } + } + + template + static void selectSourcePair(bool is_first_const, bool is_first_nullable, FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) + { + using SourceType = typename std::decay::type; + + if (is_first_nullable) + { + using NullableSource = NullableArraySource; + + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(static_cast(first), is_second_const, is_second_nullable, second, result); + } + else + { + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(first, is_second_const, is_second_nullable, second, result); + } } }; diff --git a/src/Functions/GatherUtils/has_any.cpp b/src/Functions/GatherUtils/has_any.cpp index 6ad0eeb8dc0..b7a8c9f620d 100644 --- a/src/Functions/GatherUtils/has_any.cpp +++ b/src/Functions/GatherUtils/has_any.cpp @@ -11,9 +11,53 @@ namespace struct ArrayHasAnySelectArraySourcePair : public ArraySourcePairSelector { template - static void selectSourcePair(FirstSource && first, SecondSource && second, ColumnUInt8 & result) + static void callFunction(FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) { - arrayAllAny(first, second, result); + using SourceType = typename std::decay::type; + + if (is_second_nullable) + { + using NullableSource = NullableArraySource; + + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, static_cast(second), result); + } + else + { + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, second, result); + } + } + + template + static void selectSourcePair(bool is_first_const, bool is_first_nullable, FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) + { + using SourceType = typename std::decay::type; + + if (is_first_nullable) + { + using NullableSource = NullableArraySource; + + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(static_cast(first), is_second_const, is_second_nullable, second, result); + } + else + { + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(first, is_second_const, is_second_nullable, second, result); + } } }; diff --git a/src/Functions/GatherUtils/has_substr.cpp b/src/Functions/GatherUtils/has_substr.cpp index fe16c423428..244a1d21633 100644 --- a/src/Functions/GatherUtils/has_substr.cpp +++ b/src/Functions/GatherUtils/has_substr.cpp @@ -11,9 +11,53 @@ namespace struct ArrayHasSubstrSelectArraySourcePair : public ArraySourcePairSelector { template - static void selectSourcePair(FirstSource && first, SecondSource && second, ColumnUInt8 & result) + static void callFunction(FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) { - arrayAllAny(first, second, result); + using SourceType = typename std::decay::type; + + if (is_second_nullable) + { + using NullableSource = NullableArraySource; + + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, static_cast(second), result); + } + else + { + if (is_second_const) + arrayAllAny(first, static_cast &>(second), result); + else + arrayAllAny(first, second, result); + } + } + + template + static void selectSourcePair(bool is_first_const, bool is_first_nullable, FirstSource && first, + bool is_second_const, bool is_second_nullable, SecondSource && second, + ColumnUInt8 & result) + { + using SourceType = typename std::decay::type; + + if (is_first_nullable) + { + using NullableSource = NullableArraySource; + + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(static_cast(first), is_second_const, is_second_nullable, second, result); + } + else + { + if (is_first_const) + callFunction(static_cast &>(first), is_second_const, is_second_nullable, second, result); + else + callFunction(first, is_second_const, is_second_nullable, second, result); + } } }; diff --git a/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp index 5222bf525cc..f8f46a2ac49 100644 --- a/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp +++ b/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp @@ -13,13 +13,37 @@ namespace struct SliceDynamicOffsetBoundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, const IColumn & offset_column, const IColumn & length_column, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, + const IColumn & offset_column, const IColumn & length_column, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceDynamicOffsetBounded(source, sink, offset_column, length_column); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceDynamicOffsetBounded(static_cast &>(source), sink, offset_column, length_column); + else + sliceDynamicOffsetBounded(static_cast(source), sink, offset_column, length_column); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceDynamicOffsetBounded(static_cast &>(source), sink, offset_column, length_column); + else + sliceDynamicOffsetBounded(source, sink, offset_column, length_column); + } } }; diff --git a/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp index fa98028f36f..2aa6a8903a4 100644 --- a/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp +++ b/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp @@ -14,13 +14,36 @@ struct SliceDynamicOffsetUnboundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, const IColumn & offset_column, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, const IColumn & offset_column, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceDynamicOffsetUnbounded(source, sink, offset_column); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceDynamicOffsetUnbounded(static_cast &>(source), sink, offset_column); + else + sliceDynamicOffsetUnbounded(static_cast(source), sink, offset_column); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceDynamicOffsetUnbounded(static_cast &>(source), sink, offset_column); + else + sliceDynamicOffsetUnbounded(source, sink, offset_column); + } } }; diff --git a/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp index 96ea1e076e1..404cfa1b10d 100644 --- a/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp +++ b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp @@ -14,13 +14,36 @@ struct SliceFromLeftConstantOffsetBoundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, size_t & offset, ssize_t & length, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, size_t & offset, ssize_t & length, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceFromLeftConstantOffsetBounded(source, sink, offset, length); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromLeftConstantOffsetBounded(static_cast &>(source), sink, offset, length); + else + sliceFromLeftConstantOffsetBounded(static_cast(source), sink, offset, length); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromLeftConstantOffsetBounded(static_cast &>(source), sink, offset, length); + else + sliceFromLeftConstantOffsetBounded(source, sink, offset, length); + } } }; diff --git a/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp index a6b5f799c80..1a7fb03a275 100644 --- a/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp +++ b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp @@ -14,13 +14,36 @@ struct SliceFromLeftConstantOffsetUnboundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, size_t & offset, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, size_t & offset, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceFromLeftConstantOffsetUnbounded(source, sink, offset); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromLeftConstantOffsetUnbounded(static_cast &>(source), sink, offset); + else + sliceFromLeftConstantOffsetUnbounded(static_cast(source), sink, offset); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromLeftConstantOffsetUnbounded(static_cast &>(source), sink, offset); + else + sliceFromLeftConstantOffsetUnbounded(source, sink, offset); + } } }; diff --git a/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp index 35833950cfe..faa6c6fd4e9 100644 --- a/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp +++ b/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp @@ -14,13 +14,36 @@ struct SliceFromRightConstantOffsetBoundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, size_t & offset, ssize_t & length, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, size_t & offset, ssize_t & length, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceFromRightConstantOffsetBounded(source, sink, offset, length); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromRightConstantOffsetBounded(static_cast &>(source), sink, offset, length); + else + sliceFromRightConstantOffsetBounded(static_cast(source), sink, offset, length); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromRightConstantOffsetBounded(static_cast &>(source), sink, offset, length); + else + sliceFromRightConstantOffsetBounded(source, sink, offset, length); + } } }; diff --git a/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp index 945450b4208..59bb0c21599 100644 --- a/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp +++ b/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp @@ -14,13 +14,36 @@ struct SliceFromRightConstantOffsetUnboundedSelectArraySource : public ArraySourceSelector { template - static void selectImpl(Source && source, size_t & offset, ColumnArray::MutablePtr & result) + static void selectSource(bool is_const, bool is_nullable, Source && source, size_t & offset, ColumnArray::MutablePtr & result) { using SourceType = typename std::decay::type; using Sink = typename SourceType::SinkType; - result = ColumnArray::create(source.createValuesColumn()); - Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); - sliceFromRightConstantOffsetUnbounded(source, sink, offset); + + if (is_nullable) + { + using NullableSource = NullableArraySource; + using NullableSink = typename NullableSource::SinkType; + + auto & nullable_source = static_cast(source); + + result = ColumnArray::create(nullable_source.createValuesColumn()); + NullableSink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromRightConstantOffsetUnbounded(static_cast &>(source), sink, offset); + else + sliceFromRightConstantOffsetUnbounded(static_cast(source), sink, offset); + } + else + { + result = ColumnArray::create(source.createValuesColumn()); + Sink sink(result->getData(), result->getOffsets(), source.getColumnSize()); + + if (is_const) + sliceFromRightConstantOffsetUnbounded(static_cast &>(source), sink, offset); + else + sliceFromRightConstantOffsetUnbounded(source, sink, offset); + } } }; diff --git a/src/Functions/appendTrailingCharIfAbsent.cpp b/src/Functions/appendTrailingCharIfAbsent.cpp index 67a3cbabe6d..eb625374707 100644 --- a/src/Functions/appendTrailingCharIfAbsent.cpp +++ b/src/Functions/appendTrailingCharIfAbsent.cpp @@ -92,7 +92,7 @@ private: src_offset = src_offsets[i]; dst_offset += src_length; - if (src_length > 1 && dst_data[dst_offset - 2] != trailing_char_str.front()) + if (src_length > 1 && dst_data[dst_offset - 2] != UInt8(trailing_char_str.front())) { dst_data[dst_offset - 1] = trailing_char_str.front(); dst_data[dst_offset] = 0; diff --git a/src/Functions/finalizeAggregation.cpp b/src/Functions/finalizeAggregation.cpp index 51afb4729dc..ae2a67dec20 100644 --- a/src/Functions/finalizeAggregation.cpp +++ b/src/Functions/finalizeAggregation.cpp @@ -34,11 +34,6 @@ public: return name; } - bool isStateful() const override - { - return true; - } - size_t getNumberOfArguments() const override { return 1; diff --git a/src/IO/MySQLBinlogEventReadBuffer.cpp b/src/IO/MySQLBinlogEventReadBuffer.cpp new file mode 100644 index 00000000000..3a2aba045d3 --- /dev/null +++ b/src/IO/MySQLBinlogEventReadBuffer.cpp @@ -0,0 +1,70 @@ +#include + + +namespace DB +{ + +MySQLBinlogEventReadBuffer::MySQLBinlogEventReadBuffer(ReadBuffer & in_) + : ReadBuffer(nullptr, 0, 0), in(in_) +{ + nextIfAtEnd(); +} + +bool MySQLBinlogEventReadBuffer::nextImpl() +{ + if (hasPendingData()) + return true; + + if (in.eof()) + return false; + + if (checksum_buff_size == checksum_buff_limit) + { + if (likely(in.available() > CHECKSUM_CRC32_SIGNATURE_LENGTH)) + { + working_buffer = ReadBuffer::Buffer(in.position(), in.buffer().end() - CHECKSUM_CRC32_SIGNATURE_LENGTH); + in.ignore(working_buffer.size()); + return true; + } + + in.readStrict(checksum_buf, CHECKSUM_CRC32_SIGNATURE_LENGTH); + checksum_buff_size = checksum_buff_limit = CHECKSUM_CRC32_SIGNATURE_LENGTH; + } + else + { + for (size_t index = 0; index < checksum_buff_size - checksum_buff_limit; ++index) + checksum_buf[index] = checksum_buf[checksum_buff_limit + index]; + + checksum_buff_size -= checksum_buff_limit; + size_t read_bytes = CHECKSUM_CRC32_SIGNATURE_LENGTH - checksum_buff_size; + in.readStrict(checksum_buf + checksum_buff_size, read_bytes); /// Minimum CHECKSUM_CRC32_SIGNATURE_LENGTH bytes + checksum_buff_size = checksum_buff_limit = CHECKSUM_CRC32_SIGNATURE_LENGTH; + } + + if (in.eof()) + return false; + + if (in.available() < CHECKSUM_CRC32_SIGNATURE_LENGTH) + { + size_t left_move_size = CHECKSUM_CRC32_SIGNATURE_LENGTH - in.available(); + checksum_buff_limit = checksum_buff_size - left_move_size; + } + + working_buffer = ReadBuffer::Buffer(checksum_buf, checksum_buf + checksum_buff_limit); + return true; +} + +MySQLBinlogEventReadBuffer::~MySQLBinlogEventReadBuffer() +{ + try + { + /// ignore last 4 bytes + nextIfAtEnd(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + +} diff --git a/src/IO/MySQLBinlogEventReadBuffer.h b/src/IO/MySQLBinlogEventReadBuffer.h new file mode 100644 index 00000000000..e9452aa551e --- /dev/null +++ b/src/IO/MySQLBinlogEventReadBuffer.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace DB +{ + +class MySQLBinlogEventReadBuffer : public ReadBuffer +{ +protected: + static const size_t CHECKSUM_CRC32_SIGNATURE_LENGTH = 4; + ReadBuffer & in; + + size_t checksum_buff_size = 0; + size_t checksum_buff_limit = 0; + char checksum_buf[CHECKSUM_CRC32_SIGNATURE_LENGTH]; + + bool nextImpl() override; + +public: + ~MySQLBinlogEventReadBuffer() override; + + MySQLBinlogEventReadBuffer(ReadBuffer & in_); + +}; + + +} diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index e068f3581bd..c34d7719131 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -1,9 +1,12 @@ +#include + +#if USE_AWS_S3 + #include "PocoHTTPClient.h" #include #include #include -#include #include #include #include @@ -15,6 +18,7 @@ #include #include + namespace ProfileEvents { extern const Event S3ReadMicroseconds; @@ -65,7 +69,7 @@ std::shared_ptr PocoHTTPClient::MakeRequest( Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const { auto response = Aws::MakeShared("PocoHTTPClient", request); - MakeRequestInternal(request, response, readLimiter, writeLimiter); + makeRequestInternal(request, response, readLimiter, writeLimiter); return response; } @@ -75,11 +79,11 @@ std::shared_ptr PocoHTTPClient::MakeRequest( Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const { auto response = Aws::MakeShared("PocoHTTPClient", request); - MakeRequestInternal(*request, response, readLimiter, writeLimiter); + makeRequestInternal(*request, response, readLimiter, writeLimiter); return response; } -void PocoHTTPClient::MakeRequestInternal( +void PocoHTTPClient::makeRequestInternal( Aws::Http::HttpRequest & request, std::shared_ptr & response, Aws::Utils::RateLimits::RateLimiterInterface *, @@ -101,7 +105,7 @@ void PocoHTTPClient::MakeRequestInternal( EnumSize, }; - auto selectMetric = [&request](S3MetricType type) + auto select_metric = [&request](S3MetricType type) { const ProfileEvents::Event events_map[][2] = { {ProfileEvents::S3ReadMicroseconds, ProfileEvents::S3WriteMicroseconds}, @@ -128,12 +132,12 @@ void PocoHTTPClient::MakeRequestInternal( throw Exception("Unsupported request method", ErrorCodes::NOT_IMPLEMENTED); }; - ProfileEvents::increment(selectMetric(S3MetricType::Count)); + ProfileEvents::increment(select_metric(S3MetricType::Count)); - const int MAX_REDIRECT_ATTEMPTS = 10; + static constexpr int max_redirect_attempts = 10; try { - for (int attempt = 0; attempt < MAX_REDIRECT_ATTEMPTS; ++attempt) + for (int attempt = 0; attempt < max_redirect_attempts; ++attempt) { Poco::URI poco_uri(uri); @@ -202,7 +206,7 @@ void PocoHTTPClient::MakeRequestInternal( auto & response_body_stream = session->receiveResponse(poco_response); watch.stop(); - ProfileEvents::increment(selectMetric(S3MetricType::Microseconds), watch.elapsedMicroseconds()); + ProfileEvents::increment(select_metric(S3MetricType::Microseconds), watch.elapsedMicroseconds()); int status_code = static_cast(poco_response.getStatus()); LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason()); @@ -214,7 +218,7 @@ void PocoHTTPClient::MakeRequestInternal( uri = location; LOG_DEBUG(log, "Redirecting request to new location: {}", location); - ProfileEvents::increment(selectMetric(S3MetricType::Redirects)); + ProfileEvents::increment(select_metric(S3MetricType::Redirects)); continue; } @@ -240,11 +244,11 @@ void PocoHTTPClient::MakeRequestInternal( if (status_code == 429 || status_code == 503) { // API throttling - ProfileEvents::increment(selectMetric(S3MetricType::Throttling)); + ProfileEvents::increment(select_metric(S3MetricType::Throttling)); } else { - ProfileEvents::increment(selectMetric(S3MetricType::Errors)); + ProfileEvents::increment(select_metric(S3MetricType::Errors)); } } else @@ -261,7 +265,9 @@ void PocoHTTPClient::MakeRequestInternal( response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION); response->SetClientErrorMessage(getCurrentExceptionMessage(false)); - ProfileEvents::increment(selectMetric(S3MetricType::Errors)); + ProfileEvents::increment(select_metric(S3MetricType::Errors)); } } } + +#endif diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index a94a08e217d..eefc85fae70 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -37,7 +37,7 @@ public: Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const override; private: - void MakeRequestInternal( + void makeRequestInternal( Aws::Http::HttpRequest & request, std::shared_ptr & response, Aws::Utils::RateLimits::RateLimiterInterface * readLimiter, diff --git a/src/IO/S3/PocoHTTPClientFactory.cpp b/src/IO/S3/PocoHTTPClientFactory.cpp index 68f6a6b9823..b257f96e383 100644 --- a/src/IO/S3/PocoHTTPClientFactory.cpp +++ b/src/IO/S3/PocoHTTPClientFactory.cpp @@ -1,3 +1,7 @@ +#include + +#if USE_AWS_S3 + #include "PocoHTTPClientFactory.h" #include @@ -32,3 +36,5 @@ std::shared_ptr PocoHTTPClientFactory::CreateHttpRequest } } + +#endif diff --git a/src/IO/S3/PocoHTTPResponseStream.cpp b/src/IO/S3/PocoHTTPResponseStream.cpp index 0a198268f2e..b35188d9498 100644 --- a/src/IO/S3/PocoHTTPResponseStream.cpp +++ b/src/IO/S3/PocoHTTPResponseStream.cpp @@ -1,3 +1,8 @@ +#include + +#if USE_AWS_S3 + + #include "PocoHTTPResponseStream.h" #include @@ -10,3 +15,5 @@ PocoHTTPResponseStream::PocoHTTPResponseStream(std::shared_ptr # include # include -# include # include -# include # include # include # include diff --git a/src/IO/tests/gtest_mysql_binlog_event_read_buffer.cpp b/src/IO/tests/gtest_mysql_binlog_event_read_buffer.cpp new file mode 100644 index 00000000000..f4d39c73a7c --- /dev/null +++ b/src/IO/tests/gtest_mysql_binlog_event_read_buffer.cpp @@ -0,0 +1,82 @@ +#include +#include +#include +#include +#include + +using namespace DB; + +TEST(MySQLBinlogEventReadBuffer, CheckBoundary) +{ + for (size_t index = 1; index < 4; ++index) + { + std::vector memory_data(index, 0x01); + ReadBufferFromMemory nested_in(memory_data.data(), index); + + EXPECT_THROW({ MySQLBinlogEventReadBuffer binlog_in(nested_in); }, Exception); + } +} + +TEST(MySQLBinlogEventReadBuffer, NiceBufferSize) +{ + char res[2]; + std::vector memory_data(6, 0x01); + ReadBufferFromMemory nested_in(memory_data.data(), 6); + + MySQLBinlogEventReadBuffer binlog_in(nested_in); + binlog_in.readStrict(res, 2); + ASSERT_EQ(res[0], 0x01); + ASSERT_EQ(res[1], 0x01); + ASSERT_TRUE(binlog_in.eof()); +} + +TEST(MySQLBinlogEventReadBuffer, BadBufferSizes) +{ + char res[4]; + std::vector buffers; + std::vector nested_buffers; + std::vector>> memory_buffers_data; + std::vector bad_buffers_size = {2, 1, 2, 3}; + + for (const auto & bad_buffer_size : bad_buffers_size) + { + memory_buffers_data.emplace_back(std::make_shared>(bad_buffer_size, 0x01)); + buffers.emplace_back(std::make_shared(memory_buffers_data.back()->data(), bad_buffer_size)); + nested_buffers.emplace_back(buffers.back().get()); + } + + ConcatReadBuffer concat_buffer(nested_buffers); + MySQLBinlogEventReadBuffer binlog_in(concat_buffer); + binlog_in.readStrict(res, 4); + + for (const auto & res_byte : res) + ASSERT_EQ(res_byte, 0x01); + + ASSERT_TRUE(binlog_in.eof()); +} + +TEST(MySQLBinlogEventReadBuffer, NiceAndBadBufferSizes) +{ + char res[12]; + std::vector buffers; + std::vector nested_buffers; + std::vector>> memory_buffers_data; + std::vector buffers_size = {6, 1, 3, 6}; + + for (const auto & bad_buffer_size : buffers_size) + { + memory_buffers_data.emplace_back(std::make_shared>(bad_buffer_size, 0x01)); + buffers.emplace_back(std::make_shared(memory_buffers_data.back()->data(), bad_buffer_size)); + nested_buffers.emplace_back(buffers.back().get()); + } + + ConcatReadBuffer concat_buffer(nested_buffers); + MySQLBinlogEventReadBuffer binlog_in(concat_buffer); + binlog_in.readStrict(res, 12); + + for (const auto & res_byte : res) + ASSERT_EQ(res_byte, 0x01); + + ASSERT_TRUE(binlog_in.eof()); +} + diff --git a/src/IO/ya.make b/src/IO/ya.make index 0c939588a9b..28099818b46 100644 --- a/src/IO/ya.make +++ b/src/IO/ya.make @@ -28,6 +28,7 @@ SRCS( MemoryReadWriteBuffer.cpp MMapReadBufferFromFile.cpp MMapReadBufferFromFileDescriptor.cpp + MySQLBinlogEventReadBuffer.cpp MySQLPacketPayloadReadBuffer.cpp MySQLPacketPayloadWriteBuffer.cpp NullWriteBuffer.cpp diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 466370a22a2..5b9169a878b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -367,6 +367,14 @@ AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() throw Exception("Logical error: numeric column has sizeOfField not in 1, 2, 4, 8, 16, 32.", ErrorCodes::LOGICAL_ERROR); } + if (params.keys_size == 1 && isFixedString(types_removed_nullable[0])) + { + if (has_low_cardinality) + return AggregatedDataVariants::Type::low_cardinality_key_fixed_string; + else + return AggregatedDataVariants::Type::key_fixed_string; + } + /// If all keys fits in N bits, will use hash table with all keys packed (placed contiguously) to single N-bit key. if (params.keys_size == num_fixed_contiguous_keys) { @@ -399,14 +407,6 @@ AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() return AggregatedDataVariants::Type::key_string; } - if (params.keys_size == 1 && isFixedString(types_removed_nullable[0])) - { - if (has_low_cardinality) - return AggregatedDataVariants::Type::low_cardinality_key_fixed_string; - else - return AggregatedDataVariants::Type::key_fixed_string; - } - return AggregatedDataVariants::Type::serialized; } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 8dce011c435..fac7889d6fc 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -228,7 +228,7 @@ struct AggregationMethodString static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &) { - key_columns[0]->insertData(key.data, key.size); + static_cast(key_columns[0].get())->insertData(key.data, key.size); } }; @@ -254,7 +254,7 @@ struct AggregationMethodStringNoCache static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &) { - key_columns[0]->insertData(key.data, key.size); + static_cast(key_columns[0].get())->insertData(key.data, key.size); } }; @@ -280,7 +280,7 @@ struct AggregationMethodFixedString static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &) { - key_columns[0]->insertData(key.data, key.size); + static_cast(key_columns[0].get())->insertData(key.data, key.size); } }; @@ -305,7 +305,7 @@ struct AggregationMethodFixedStringNoCache static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &) { - key_columns[0]->insertData(key.data, key.size); + static_cast(key_columns[0].get())->insertData(key.data, key.size); } }; diff --git a/src/Interpreters/ApplyWithSubqueryVisitor.cpp b/src/Interpreters/ApplyWithSubqueryVisitor.cpp new file mode 100644 index 00000000000..e03682dafb3 --- /dev/null +++ b/src/Interpreters/ApplyWithSubqueryVisitor.cpp @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +void ApplyWithSubqueryVisitor::visit(ASTPtr & ast, const Data & data) +{ + if (auto * node_select = ast->as()) + { + auto with = node_select->with(); + std::optional new_data; + if (with) + { + for (auto & child : with->children) + visit(child, data); + for (auto & child : with->children) + { + if (auto * ast_with_elem = child->as()) + { + if (!new_data) + new_data = data; + new_data->subqueries[ast_with_elem->name] = ast_with_elem->subquery; + } + } + } + + for (auto & child : node_select->children) + { + if (child != with) + visit(child, new_data ? *new_data : data); + } + return; + } + + for (auto & child : ast->children) + visit(child, data); + if (auto * node_func = ast->as()) + visit(*node_func, data); + else if (auto * node_table = ast->as()) + visit(*node_table, data); +} + +void ApplyWithSubqueryVisitor::visit(ASTTableExpression & table, const Data & data) +{ + if (table.database_and_table_name) + { + auto table_id = IdentifierSemantic::extractDatabaseAndTable(table.database_and_table_name->as()); + if (table_id.database_name.empty()) + { + auto subquery_it = data.subqueries.find(table_id.table_name); + if (subquery_it != data.subqueries.end()) + { + table.children.clear(); + table.database_and_table_name.reset(); + table.subquery = subquery_it->second->clone(); + dynamic_cast(*table.subquery).alias = table_id.table_name; + table.children.emplace_back(table.subquery); + } + } + } +} + +void ApplyWithSubqueryVisitor::visit(ASTFunction & func, const Data & data) +{ + if (checkFunctionIsInOrGlobalInOperator(func)) + { + auto & ast = func.arguments->children.at(1); + if (const auto * ident = ast->as()) + { + auto table_id = IdentifierSemantic::extractDatabaseAndTable(*ident); + if (table_id.database_name.empty()) + { + auto subquery_it = data.subqueries.find(table_id.table_name); + if (subquery_it != data.subqueries.end()) + { + func.arguments->children[1] = subquery_it->second->clone(); + dynamic_cast(*func.arguments->children[1]).alias = table_id.table_name; + } + } + } + } +} + +} diff --git a/src/Interpreters/ApplyWithSubqueryVisitor.h b/src/Interpreters/ApplyWithSubqueryVisitor.h new file mode 100644 index 00000000000..2aecd6aee01 --- /dev/null +++ b/src/Interpreters/ApplyWithSubqueryVisitor.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include + +namespace DB +{ +// TODO After we support `union_with_global`, this visitor should also be extended to match ASTSelectQueryWithUnion. +class ASTSelectQuery; +class ASTFunction; +struct ASTTableExpression; + +class ApplyWithSubqueryVisitor +{ +public: + struct Data + { + std::map subqueries; + }; + + static void visit(ASTPtr & ast) { visit(ast, {}); } + +private: + static void visit(ASTPtr & ast, const Data & data); + static void visit(ASTTableExpression & table, const Data & data); + static void visit(ASTFunction & func, const Data & data); +}; + +} diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index ac71a88dc00..feb2036a0d6 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -332,7 +332,7 @@ void AsynchronousMetrics::update() ReadBufferFromFile buf("/proc/cpuinfo", 32768 /* buf_size */); // We need the following lines: - // core id : 4 + // processor : 4 // cpu MHz : 4052.941 // They contain tabs and are interspersed with other info. int core_id = 0; @@ -346,7 +346,7 @@ void AsynchronousMetrics::update() // It doesn't read the EOL itself. ++buf.position(); - if (s.rfind("core id", 0) == 0) + if (s.rfind("processor", 0) == 0) { if (auto colon = s.find_first_of(':')) { diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index caf93355c1d..024027c7c75 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -106,7 +106,6 @@ namespace ErrorCodes extern const int SESSION_NOT_FOUND; extern const int SESSION_IS_LOCKED; extern const int LOGICAL_ERROR; - extern const int AUTHENTICATION_FAILED; extern const int NOT_IMPLEMENTED; } @@ -356,6 +355,7 @@ struct ContextShared mutable std::shared_ptr merge_tree_storage_policy_selector; std::optional merge_tree_settings; /// Settings of MergeTree* engines. + std::optional replicated_merge_tree_settings; /// Settings of ReplicatedMergeTree* engines. std::atomic_size_t max_table_size_to_drop = 50000000000lu; /// Protects MergeTree tables from accidental DROP (50GB by default) std::atomic_size_t max_partition_size_to_drop = 50000000000lu; /// Protects MergeTree partitions from accidental DROP (50GB by default) String format_schema_path; /// Path to a directory that contains schema files used by input formats. @@ -689,22 +689,20 @@ void Context::setUserImpl(const String & name, const std::optional & pas client_info.current_password = password.value_or(""); #endif - auto new_user_id = getAccessControlManager().find(name); - std::shared_ptr new_access; - if (new_user_id) + /// Find a user with such name and check the password. + UUID new_user_id; + if (password) + new_user_id = getAccessControlManager().login(name, *password, address.host()); + else { - new_access = getAccessControlManager().getContextAccess(*new_user_id, {}, true, settings, current_database, client_info); /// Access w/o password is done under interserver-secret (remote_servers.secret) - /// So it is okay not to check client's host (since there is trust). - if (password && (!new_access->isClientHostAllowed() || !new_access->isCorrectPassword(*password))) - { - new_user_id = {}; - new_access = nullptr; - } + /// So it is okay not to check client's host in this case (since there is trust). + new_user_id = getAccessControlManager().getIDOfLoggedUser(name); } - if (!new_user_id || !new_access) - throw Exception(name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED); + auto new_access = getAccessControlManager().getContextAccess( + new_user_id, /* current_roles = */ {}, /* use_default_roles = */ true, + settings, current_database, client_info); user_id = new_user_id; access = std::move(new_access); @@ -716,7 +714,7 @@ void Context::setUserImpl(const String & name, const std::optional & pas void Context::setUser(const String & name, const String & password, const Poco::Net::SocketAddress & address) { - setUserImpl(name, std::make_optional(password), address); + setUserImpl(name, password, address); } void Context::setUserWithoutCheckingPassword(const String & name, const Poco::Net::SocketAddress & address) @@ -1926,6 +1924,22 @@ const MergeTreeSettings & Context::getMergeTreeSettings() const return *shared->merge_tree_settings; } +const MergeTreeSettings & Context::getReplicatedMergeTreeSettings() const +{ + auto lock = getLock(); + + if (!shared->replicated_merge_tree_settings) + { + const auto & config = getConfigRef(); + MergeTreeSettings mt_settings; + mt_settings.loadFromConfig("merge_tree", config); + mt_settings.loadFromConfig("replicated_merge_tree", config); + shared->replicated_merge_tree_settings.emplace(mt_settings); + } + + return *shared->replicated_merge_tree_settings; +} + const StorageS3Settings & Context::getStorageS3Settings() const { #if !defined(ARCADIA_BUILD) diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 369b10f8b9a..956abd4e864 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -259,11 +259,13 @@ public: /// Sets the current user, checks the password and that the specified host is allowed. /// Must be called before getClientInfo. void setUser(const String & name, const String & password, const Poco::Net::SocketAddress & address); - /// Sets the current user, *do not checks the password* but check that the specified host is allowed. + + /// Sets the current user, *do not checks the password and that the specified host is allowed*. /// Must be called before getClientInfo. /// /// (Used only internally in cluster, if the secret matches) void setUserWithoutCheckingPassword(const String & name, const Poco::Net::SocketAddress & address); + void setQuotaKey(String quota_key_); UserPtr getUser() const; @@ -545,6 +547,7 @@ public: std::shared_ptr getPartLog(const String & part_database); const MergeTreeSettings & getMergeTreeSettings() const; + const MergeTreeSettings & getReplicatedMergeTreeSettings() const; const StorageS3Settings & getStorageS3Settings() const; /// Prevents DROP TABLE if its size is greater than max_size (50GB by default, max_size=0 turn off this check) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index b9b52e2f3fe..5b346eec54a 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -38,10 +37,11 @@ #include #include #include +#include #include +#include #include #include -#include namespace DB @@ -144,7 +144,7 @@ struct DDLLogEntry rb >> "version: " >> version >> "\n"; if (version != CURRENT_VERSION) - throw Exception("Unknown DDLLogEntry format version: " + DB::toString(version), ErrorCodes::UNKNOWN_FORMAT_VERSION); + throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unknown DDLLogEntry format version: {}", version); Strings host_id_strings; rb >> "query: " >> escape >> query >> "\n"; @@ -308,9 +308,14 @@ static bool isSupportedAlterType(int type) } -DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix) - : context(context_), log(&Poco::Logger::get("DDLWorker")) +DDLWorker::DDLWorker(int pool_size_, const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix) + : context(context_) + , log(&Poco::Logger::get("DDLWorker")) + , pool_size(pool_size_) + , worker_pool(pool_size_) { + last_tasks.reserve(pool_size); + queue_dir = zk_root_dir; if (queue_dir.back() == '/') queue_dir.resize(queue_dir.size() - 1); @@ -343,6 +348,7 @@ DDLWorker::~DDLWorker() stop_flag = true; queue_updated_event->set(); cleanup_event->set(); + worker_pool.wait(); main_thread.join(); cleanup_thread.join(); } @@ -364,8 +370,27 @@ DDLWorker::ZooKeeperPtr DDLWorker::getAndSetZooKeeper() return current_zookeeper; } +void DDLWorker::recoverZooKeeper() +{ + LOG_DEBUG(log, "Recovering ZooKeeper session after: {}", getCurrentExceptionMessage(false)); -bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper) + while (!stop_flag) + { + try + { + getAndSetZooKeeper(); + break; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + sleepForSeconds(5); + } + } +} + + +DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper) { String node_data; String entry_path = queue_dir + "/" + entry_name; @@ -374,7 +399,7 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, { /// It is Ok that node could be deleted just now. It means that there are no current host in node's host list. out_reason = "The task was deleted"; - return false; + return {}; } auto task = std::make_unique(); @@ -405,7 +430,7 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, } out_reason = "Incorrect task format"; - return false; + return {}; } bool host_in_hostlist = false; @@ -433,12 +458,13 @@ bool DDLWorker::initAndCheckTask(const String & entry_name, String & out_reason, } } - if (host_in_hostlist) - current_task = std::move(task); - else + if (!host_in_hostlist) + { out_reason = "There is no a local address in host list"; + return {}; + } - return host_in_hostlist; + return task; } @@ -448,10 +474,9 @@ static void filterAndSortQueueNodes(Strings & all_nodes) std::sort(all_nodes.begin(), all_nodes.end()); } - -void DDLWorker::processTasks() +void DDLWorker::scheduleTasks() { - LOG_DEBUG(log, "Processing tasks"); + LOG_DEBUG(log, "Scheduling tasks"); auto zookeeper = tryGetZooKeeper(); Strings queue_nodes = zookeeper->getChildren(queue_dir, nullptr, queue_updated_event); @@ -459,86 +484,61 @@ void DDLWorker::processTasks() if (queue_nodes.empty()) return; - bool server_startup = last_processed_task_name.empty(); + bool server_startup = last_tasks.empty(); auto begin_node = server_startup ? queue_nodes.begin() - : std::upper_bound(queue_nodes.begin(), queue_nodes.end(), last_processed_task_name); + : std::upper_bound(queue_nodes.begin(), queue_nodes.end(), last_tasks.back()); for (auto it = begin_node; it != queue_nodes.end(); ++it) { String entry_name = *it; - if (current_task) + String reason; + auto task = initAndCheckTask(entry_name, reason, zookeeper); + if (!task) { - if (current_task->entry_name == entry_name) - { - LOG_INFO(log, "Trying to process task {} again", entry_name); - } - else - { - LOG_INFO(log, "Task {} was deleted from ZooKeeper before current host committed it", current_task->entry_name); - current_task = nullptr; - } + LOG_DEBUG(log, "Will not execute task {}: {}", entry_name, reason); + saveTask(entry_name); + continue; } - if (!current_task) + bool already_processed = zookeeper->exists(task->entry_path + "/finished/" + task->host_id_str); + if (!server_startup && !task->was_executed && already_processed) { - String reason; - if (!initAndCheckTask(entry_name, reason, zookeeper)) - { - LOG_DEBUG(log, "Will not execute task {}: {}", entry_name, reason); - last_processed_task_name = entry_name; - continue; - } - } - - DDLTask & task = *current_task; - - bool already_processed = zookeeper->exists(task.entry_path + "/finished/" + task.host_id_str); - if (!server_startup && !task.was_executed && already_processed) - { - throw Exception( - "Server expects that DDL task " + task.entry_name + " should be processed, but it was already processed according to ZK", - ErrorCodes::LOGICAL_ERROR); + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Server expects that DDL task {} should be processed, but it was already processed according to ZK", + entry_name); } if (!already_processed) { - try + worker_pool.scheduleOrThrowOnError([this, task_ptr = task.release()]() { - processTask(task, zookeeper); - } - catch (const Coordination::Exception & e) - { - if (server_startup && e.code == Coordination::Error::ZNONODE) - { - LOG_WARNING(log, "ZooKeeper NONODE error during startup. Ignoring entry {} ({}) : {}", task.entry_name, task.entry.query, getCurrentExceptionMessage(true)); - } - else - { - throw; - } - } - catch (...) - { - LOG_WARNING(log, "An error occurred while processing task {} ({}) : {}", task.entry_name, task.entry.query, getCurrentExceptionMessage(true)); - throw; - } + setThreadName("DDLWorkerExec"); + enqueueTask(DDLTaskPtr(task_ptr)); + }); } else { - LOG_DEBUG(log, "Task {} ({}) has been already processed", task.entry_name, task.entry.query); + LOG_DEBUG(log, "Task {} ({}) has been already processed", entry_name, task->entry.query); } - last_processed_task_name = task.entry_name; - current_task.reset(); + saveTask(entry_name); if (stop_flag) break; } } +void DDLWorker::saveTask(const String & entry_name) +{ + if (last_tasks.size() == pool_size) + { + last_tasks.erase(last_tasks.begin()); + } + last_tasks.emplace_back(entry_name); +} /// Parses query and resolves cluster and host in cluster void DDLWorker::parseQueryAndResolveHost(DDLTask & task) @@ -559,10 +559,9 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) task.cluster_name = task.query_on_cluster->cluster; task.cluster = context.tryGetCluster(task.cluster_name); if (!task.cluster) - { - throw Exception("DDL task " + task.entry_name + " contains current host " + task.host_id.readableString() - + " in cluster " + task.cluster_name + ", but there are no such cluster here.", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); - } + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "DDL task {} contains current host {} in cluster {}, but there are no such cluster here.", + task.entry_name, task.host_id.readableString(), task.cluster_name); /// Try to find host from task host list in cluster /// At the first, try find exact match (host name and ports should be literally equal) @@ -583,10 +582,9 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) { if (default_database == address.default_database) { - throw Exception( - "There are two exactly the same ClickHouse instances " + address.readableString() + " in cluster " - + task.cluster_name, - ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "There are two exactly the same ClickHouse instances {} in cluster {}", + address.readableString(), task.cluster_name); } else { @@ -600,9 +598,8 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) auto * query_with_table = dynamic_cast(task.query.get()); if (!query_with_table || query_with_table->database.empty()) { - throw Exception( - "For a distributed DDL on circular replicated cluster its table name must be qualified by database name.", - ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "For a distributed DDL on circular replicated cluster its table name must be qualified by database name."); } if (default_database == query_with_table->database) return; @@ -635,8 +632,9 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) { if (found_via_resolving) { - throw Exception("There are two the same ClickHouse instances in cluster " + task.cluster_name + " : " - + task.address_in_cluster.readableString() + " and " + address.readableString(), ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "There are two the same ClickHouse instances in cluster {} : {} and {}", + task.cluster_name, task.address_in_cluster.readableString(), address.readableString()); } else { @@ -651,8 +649,9 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task) if (!found_via_resolving) { - throw Exception("Not found host " + task.host_id.readableString() + " in definition of cluster " + task.cluster_name, - ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "Not found host {} in definition of cluster {}", + task.host_id.readableString(), task.cluster_name); } else { @@ -673,7 +672,7 @@ bool DDLWorker::tryExecuteQuery(const String & query, const DDLTask & task, Exec try { - current_context = std::make_unique(context); + auto current_context = std::make_unique(context); current_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; current_context->setCurrentQueryId(""); // generate random query_id executeQuery(istr, ostr, false, *current_context, {}); @@ -707,8 +706,44 @@ void DDLWorker::attachToThreadGroup() } -void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper) +void DDLWorker::enqueueTask(DDLTaskPtr task_ptr) { + auto & task = *task_ptr; + + while (!stop_flag) + { + try + { + processTask(task); + return; + } + catch (const Coordination::Exception & e) + { + if (Coordination::isHardwareError(e.code)) + { + recoverZooKeeper(); + } + else if (e.code == Coordination::Error::ZNONODE) + { + LOG_ERROR(log, "ZooKeeper error: {}", getCurrentExceptionMessage(true)); + // TODO: retry? + } + else + { + LOG_ERROR(log, "Unexpected ZooKeeper error: {}.", getCurrentExceptionMessage(true)); + return; + } + } + catch (...) + { + LOG_WARNING(log, "An error occurred while processing task {} ({}) : {}", task.entry_name, task.entry.query, getCurrentExceptionMessage(true)); + } + } +} +void DDLWorker::processTask(DDLTask & task) +{ + auto zookeeper = tryGetZooKeeper(); + LOG_DEBUG(log, "Processing task {} ({})", task.entry_name, task.entry.query); String dummy; @@ -816,16 +851,17 @@ void DDLWorker::checkShardConfig(const String & table, const DDLTask & task, Sto if (storage->supportsReplication() && !config_is_replicated_shard) { - throw Exception("Table " + backQuote(table) + " is replicated, but shard #" + toString(task.host_shard_num + 1) + - " isn't replicated according to its cluster definition." - " Possibly true is forgotten in the cluster config.", - ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "Table {} is replicated, but shard #{} isn't replicated according to its cluster definition. " + "Possibly true is forgotten in the cluster config.", + backQuote(table), task.host_shard_num + 1); } if (!storage->supportsReplication() && config_is_replicated_shard) { - throw Exception("Table " + backQuote(table) + " isn't replicated, but shard #" + toString(task.host_shard_num + 1) + - " is replicated according to its cluster definition", ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION); + throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, + "Table {} isn't replicated, but shard #{} is replicated according to its cluster definition", + backQuote(table), task.host_shard_num + 1); } } @@ -841,7 +877,7 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica( /// If we will develop new replicated storage if (!replicated_storage) - throw Exception("Storage type '" + storage->getName() + "' is not supported by distributed DDL", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Storage type '{}' is not supported by distributed DDL", storage->getName()); /// Generate unique name for shard node, it will be used to execute the query by only single host /// Shard node name has format 'replica_name1,replica_name2,...,replica_nameN' @@ -1118,7 +1154,7 @@ void DDLWorker::runMainThread() attachToThreadGroup(); cleanup_event->set(); - processTasks(); + scheduleTasks(); LOG_DEBUG(log, "Waiting a watch"); queue_updated_event->wait(); @@ -1127,23 +1163,7 @@ void DDLWorker::runMainThread() { if (Coordination::isHardwareError(e.code)) { - LOG_DEBUG(log, "Recovering ZooKeeper session after: {}", getCurrentExceptionMessage(false)); - - while (!stop_flag) - { - try - { - getAndSetZooKeeper(); - break; - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - - using namespace std::chrono_literals; - std::this_thread::sleep_for(5s); - } - } + recoverZooKeeper(); } else if (e.code == Coordination::Error::ZNONODE) { @@ -1260,28 +1280,24 @@ public: size_t num_unfinished_hosts = waiting_hosts.size() - num_hosts_finished; size_t num_active_hosts = current_active_hosts.size(); - std::stringstream msg; - msg << "Watching task " << node_path << " is executing longer than distributed_ddl_task_timeout" - << " (=" << timeout_seconds << ") seconds." - << " There are " << num_unfinished_hosts << " unfinished hosts" - << " (" << num_active_hosts << " of them are currently active)" - << ", they are going to execute the query in background"; - throw Exception(msg.str(), ErrorCodes::TIMEOUT_EXCEEDED); + throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, + "Watching task {} is executing longer than distributed_ddl_task_timeout (={}) seconds. " + "There are {} unfinished hosts ({} of them are currently active), they are going to execute the query in background", + node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts); } if (num_hosts_finished != 0 || try_number != 0) { - auto current_sleep_for = std::chrono::milliseconds(std::min(static_cast(1000), 50 * (try_number + 1))); - std::this_thread::sleep_for(current_sleep_for); + sleepForMilliseconds(std::min(1000, 50 * (try_number + 1))); } /// TODO: add shared lock if (!zookeeper->exists(node_path)) { - throw Exception("Cannot provide query execution status. The query's node " + node_path - + " has been deleted by the cleaner since it was finished (or its lifetime is expired)", - ErrorCodes::UNFINISHED); + throw Exception(ErrorCodes::UNFINISHED, + "Cannot provide query execution status. The query's node {} has been deleted by the cleaner since it was finished (or its lifetime is expired)", + node_path); } Strings new_hosts = getNewAndUpdate(getChildrenAllowNoNode(zookeeper, node_path + "/finished")); @@ -1304,7 +1320,7 @@ public: auto [host, port] = Cluster::Address::fromString(host_id); if (status.code != 0 && first_exception == nullptr) - first_exception = std::make_unique("There was an error on [" + host + ":" + toString(port) + "]: " + status.message, status.code); + first_exception = std::make_unique(status.code, "There was an error on [{}:{}]: {}", host, port, status.message); ++num_hosts_finished; diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 544fb3da27d..f6b4dd00684 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -26,6 +26,7 @@ class ASTAlterQuery; class AccessRightsElements; struct DDLLogEntry; struct DDLTask; +using DDLTaskPtr = std::unique_ptr; /// Pushes distributed DDL query to the queue @@ -37,7 +38,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & conte class DDLWorker { public: - DDLWorker(const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix); + DDLWorker(int pool_size_, const std::string & zk_root_dir, Context & context_, const Poco::Util::AbstractConfiguration * config, const String & prefix); ~DDLWorker(); /// Pushes query into DDL queue, returns path to created node @@ -57,14 +58,19 @@ private: ZooKeeperPtr tryGetZooKeeper() const; /// If necessary, creates a new session and caches it. ZooKeeperPtr getAndSetZooKeeper(); + /// ZooKeeper recover loop (while not stopped). + void recoverZooKeeper(); - void processTasks(); + void checkCurrentTasks(); + void scheduleTasks(); + void saveTask(const String & entry_name); /// Reads entry and check that the host belongs to host list of the task - /// Returns true and sets current_task if entry parsed and the check is passed - bool initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper); + /// Returns non-empty DDLTaskPtr if entry parsed and the check is passed + DDLTaskPtr initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper); - void processTask(DDLTask & task, const ZooKeeperPtr & zookeeper); + void enqueueTask(DDLTaskPtr task); + void processTask(DDLTask & task); /// Check that query should be executed on leader replica only static bool taskShouldBeExecutedOnLeader(const ASTPtr ast_ddl, StoragePtr storage); @@ -101,32 +107,31 @@ private: void attachToThreadGroup(); private: - bool is_circular_replicated; + std::atomic is_circular_replicated = false; Context & context; Poco::Logger * log; - std::unique_ptr current_context; std::string host_fqdn; /// current host domain name std::string host_fqdn_id; /// host_name:port std::string queue_dir; /// dir with queue of queries - /// Name of last task that was skipped or successfully executed - std::string last_processed_task_name; - mutable std::mutex zookeeper_mutex; ZooKeeperPtr current_zookeeper; /// Save state of executed task to avoid duplicate execution on ZK error - using DDLTaskPtr = std::unique_ptr; - DDLTaskPtr current_task; + std::vector last_tasks; std::shared_ptr queue_updated_event = std::make_shared(); std::shared_ptr cleanup_event = std::make_shared(); - std::atomic stop_flag{false}; + std::atomic stop_flag = false; ThreadFromGlobalPool main_thread; ThreadFromGlobalPool cleanup_thread; + /// Size of the pool for query execution. + size_t pool_size = 1; + ThreadPool worker_pool; + /// Cleaning starts after new node event is received if the last cleaning wasn't made sooner than N seconds ago Int64 cleanup_delay_period = 60; // minute (in seconds) /// Delete node if its age is greater than that diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index ee29d301c6b..f7a1fc83182 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -40,6 +41,10 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr if (node->as()) return false; + /// Do not go to subqueries defined in with statement + if (node->as()) + return false; + if (node->as()) { /// Do not go to FROM, JOIN, UNION. diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 33fa6215160..0c287e4026d 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -607,8 +607,16 @@ void ExpressionActions::execute(Block & block, bool dry_run) const { for (const auto & action : actions) { - action.execute(block, dry_run); - checkLimits(block); + try + { + action.execute(block, dry_run); + checkLimits(block); + } + catch (Exception & e) + { + e.addMessage(fmt::format("while executing '{}'", action.toString())); + throw; + } } } diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index d9fc44d9b8f..8d67672612c 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1142,8 +1142,8 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (hasJoin()) { /// You may find it strange but we support read_in_order for HashJoin and do not support for MergeJoin. - bool has_delayed_stream = query_analyzer.analyzedJoin().needStreamWithNonJoinedRows(); - join_allow_read_in_order = typeid_cast(join.get()) && !has_delayed_stream; + join_has_delayed_stream = query_analyzer.analyzedJoin().needStreamWithNonJoinedRows(); + join_allow_read_in_order = typeid_cast(join.get()) && !join_has_delayed_stream; } optimize_read_in_order = diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 1cc9d75b19f..cbfebafa439 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -176,6 +176,7 @@ struct ExpressionAnalysisResult bool remove_where_filter = false; bool optimize_read_in_order = false; bool optimize_aggregation_in_order = false; + bool join_has_delayed_stream = false; ExpressionActionsPtr before_array_join; ArrayJoinActionPtr array_join; diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index 8cf581eb463..73251cad991 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -222,6 +222,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS required_access.emplace_back(AccessType::ALTER_TTL, database, table); break; } + case ASTAlterCommand::REMOVE_TTL: + { + required_access.emplace_back(AccessType::ALTER_TTL, database, table); + break; + } case ASTAlterCommand::MATERIALIZE_TTL: { required_access.emplace_back(AccessType::ALTER_MATERIALIZE_TTL, database, table); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 887f4795bcb..22106387fc4 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -14,6 +14,7 @@ #include +#include #include #include #include @@ -25,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -249,6 +250,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( source_header = input_pipe->getHeader(); } + ApplyWithSubqueryVisitor().visit(query_ptr); + JoinedTables joined_tables(getSubqueryContext(*context), getSelectQuery()); if (!has_input && !storage) @@ -618,7 +621,6 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, const Co { SortDescription order_descr; order_descr.reserve(query.orderBy()->children.size()); - SpecialSort special_sort = context.getSettings().special_sort.value; for (const auto & elem : query.orderBy()->children) { String name = elem->children.front()->getColumnName(); @@ -632,10 +634,10 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, const Co { FillColumnDescription fill_desc = getWithFillDescription(order_by_elem, context); order_descr.emplace_back(name, order_by_elem.direction, - order_by_elem.nulls_direction, collator, special_sort, true, fill_desc); + order_by_elem.nulls_direction, collator, true, fill_desc); } else - order_descr.emplace_back(name, order_by_elem.direction, order_by_elem.nulls_direction, collator, special_sort); + order_descr.emplace_back(name, order_by_elem.direction, order_by_elem.nulls_direction, collator); } return order_descr; @@ -925,8 +927,9 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu join_step->setStepDescription("JOIN"); query_plan.addStep(std::move(join_step)); - if (auto stream = join->createStreamWithNonJoinedRows(join_result_sample, settings.max_block_size)) + if (expressions.join_has_delayed_stream) { + auto stream = std::make_shared(*join, join_result_sample, settings.max_block_size); auto source = std::make_shared(std::move(stream)); auto add_non_joined_rows_step = std::make_unique( query_plan.getCurrentDataStream(), std::move(source)); @@ -1441,16 +1444,22 @@ void InterpreterSelectQuery::executeFetchColumns( } StreamLocalLimits limits; + SizeLimits leaf_limits; std::shared_ptr quota; + /// Set the limits and quota for reading data, the speed and time of the query. if (!options.ignore_limits) + { limits = getLimitsForStorage(settings, options); + leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, settings.max_bytes_to_read_leaf, + settings.read_overflow_mode_leaf); + } if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete)) quota = context->getQuota(); - storage->read(query_plan, table_lock, metadata_snapshot, limits, std::move(quota), + storage->read(query_plan, table_lock, metadata_snapshot, limits, leaf_limits, std::move(quota), required_columns, query_info, context, processing_stage, max_block_size, max_streams); } else diff --git a/src/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp index 4b0d4c21ad1..ef7fd840ac5 100644 --- a/src/Interpreters/InterpreterShowTablesQuery.cpp +++ b/src/Interpreters/InterpreterShowTablesQuery.cpp @@ -31,7 +31,24 @@ String InterpreterShowTablesQuery::getRewrittenQuery() /// SHOW DATABASES if (query.databases) - return "SELECT name FROM system.databases"; + { + std::stringstream rewritten_query; + rewritten_query << "SELECT name FROM system.databases"; + + if (!query.like.empty()) + { + rewritten_query + << " WHERE name " + << (query.not_like ? "NOT " : "") + << (query.case_insensitive_like ? "ILIKE " : "LIKE ") + << std::quoted(query.like, '\''); + } + + if (query.limit_length) + rewritten_query << " LIMIT " << query.limit_length; + + return rewritten_query.str(); + } /// SHOW CLUSTER/CLUSTERS if (query.clusters) @@ -41,7 +58,11 @@ String InterpreterShowTablesQuery::getRewrittenQuery() if (!query.like.empty()) { - rewritten_query << " WHERE cluster " << (query.not_like ? "NOT " : "") << "LIKE " << std::quoted(query.like, '\''); + rewritten_query + << " WHERE cluster " + << (query.not_like ? "NOT " : "") + << (query.case_insensitive_like ? "ILIKE " : "LIKE ") + << std::quoted(query.like, '\''); } if (query.limit_length) @@ -85,7 +106,11 @@ String InterpreterShowTablesQuery::getRewrittenQuery() rewritten_query << "database = " << std::quoted(database, '\''); if (!query.like.empty()) - rewritten_query << " AND name " << (query.not_like ? "NOT " : "") << "LIKE " << std::quoted(query.like, '\''); + rewritten_query + << " AND name " + << (query.not_like ? "NOT " : "") + << (query.case_insensitive_like ? "ILIKE " : "LIKE ") + << std::quoted(query.like, '\''); else if (query.where_expression) rewritten_query << " AND (" << query.where_expression << ")"; diff --git a/src/Interpreters/JoinSwitcher.h b/src/Interpreters/JoinSwitcher.h index ea9b94546ef..23f5dff54d7 100644 --- a/src/Interpreters/JoinSwitcher.h +++ b/src/Interpreters/JoinSwitcher.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -75,4 +76,38 @@ private: void switchJoin(); }; + +/// Creates NonJoinedBlockInputStream on the first read. Allows to swap join algo before it. +class LazyNonJoinedBlockInputStream : public IBlockInputStream +{ +public: + LazyNonJoinedBlockInputStream(const IJoin & join_, const Block & block, UInt64 max_block_size_) + : join(join_) + , result_sample_block(block) + , max_block_size(max_block_size_) + {} + + String getName() const override { return "LazyNonMergeJoined"; } + Block getHeader() const override { return result_sample_block; } + +protected: + Block readImpl() override + { + if (!stream) + { + stream = join.createStreamWithNonJoinedRows(result_sample_block, max_block_size); + if (!stream) + return {}; + } + + return stream->read(); + } + +private: + BlockInputStreamPtr stream; + const IJoin & join; + Block result_sample_block; + UInt64 max_block_size; +}; + } diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index 0154f8453b3..c9072ec3480 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -602,7 +602,7 @@ void MergeJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) { JoinCommon::checkTypesOfKeys(block, table_join->keyNamesLeft(), right_table_keys, table_join->keyNamesRight()); materializeBlockInplace(block); - JoinCommon::removeLowCardinalityInplace(block, table_join->keyNamesLeft()); + JoinCommon::removeLowCardinalityInplace(block, table_join->keyNamesLeft(), false); sortBlock(block, left_sort_description); @@ -636,6 +636,8 @@ void MergeJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) /// Back thread even with no data. We have some unfinished data in buffer. if (!not_processed && left_blocks_buffer) not_processed = std::make_shared(NotProcessed{{}, 0, 0, 0}); + + JoinCommon::restoreLowCardinalityInplace(block); } template diff --git a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp index dd63093493f..765e7b1fa3d 100644 --- a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp +++ b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp @@ -33,6 +33,9 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v { if (const auto * expr_list = function->arguments->as()) { + if (expr_list->children.size() != 2) + throw Exception("Function CAST must have exactly two arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + const ASTPtr & type_ast = expr_list->children.at(1); if (const auto * type_literal = type_ast->as()) { diff --git a/src/Interpreters/QueryParameterVisitor.cpp b/src/Interpreters/QueryParameterVisitor.cpp new file mode 100644 index 00000000000..297bc211712 --- /dev/null +++ b/src/Interpreters/QueryParameterVisitor.cpp @@ -0,0 +1,53 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ + +class QueryParameterVisitor +{ +public: + explicit QueryParameterVisitor(NameSet & parameters_name) + : query_parameters(parameters_name) + { + } + + void visit(const ASTPtr & ast) + { + for (const auto & child : ast->children) + { + if (const auto & query_parameter = child->as()) + visitQueryParameter(*query_parameter); + else + visit(child); + } + } + +private: + NameSet & query_parameters; + + void visitQueryParameter(const ASTQueryParameter & query_parameter) + { + query_parameters.insert(query_parameter.name); + } +}; + + +NameSet analyzeReceiveQueryParams(const std::string & query) +{ + NameSet query_params; + const char * query_begin = query.data(); + const char * query_end = query.data() + query.size(); + + ParserQuery parser(query_end, false); + ASTPtr extract_query_ast = parseQuery(parser, query_begin, query_end, "analyzeReceiveQueryParams", 0, 0); + QueryParameterVisitor(query_params).visit(extract_query_ast); + return query_params; +} + +} + diff --git a/src/Interpreters/QueryParameterVisitor.h b/src/Interpreters/QueryParameterVisitor.h index d3e618058c0..531de2ddafa 100644 --- a/src/Interpreters/QueryParameterVisitor.h +++ b/src/Interpreters/QueryParameterVisitor.h @@ -1,49 +1,13 @@ #pragma once +#include #include -#include -#include -#include -#include + namespace DB { -class QueryParameterVisitor -{ -public: - QueryParameterVisitor(NameSet & parameters_name) : query_parameters(parameters_name) {} - - void visit(const ASTPtr & ast) - { - for (const auto & child : ast->children) - { - if (const auto & query_parameter = child->as()) - visitQueryParameter(*query_parameter); - else - visit(child); - } - } - -private: - NameSet & query_parameters; - - void visitQueryParameter(const ASTQueryParameter & query_parameter) - { - query_parameters.insert(query_parameter.name); - } -}; - -NameSet analyzeReceiveQueryParams(const std::string & query) -{ - NameSet query_params; - const char * query_begin = query.data(); - const char * query_end = query.data() + query.size(); - - ParserQuery parser(query_end, false); - ASTPtr extract_query_ast = parseQuery(parser, query_begin, query_end, "analyzeReceiveQueryParams", 0, 0); - QueryParameterVisitor(query_params).visit(extract_query_ast); - return query_params; -} +/// Find parameters in a query and collect them into set. +NameSet analyzeReceiveQueryParams(const std::string & query); } diff --git a/src/Interpreters/ReplaceQueryParameterVisitor.cpp b/src/Interpreters/ReplaceQueryParameterVisitor.cpp index ec824ed0cce..3dabfb06770 100644 --- a/src/Interpreters/ReplaceQueryParameterVisitor.cpp +++ b/src/Interpreters/ReplaceQueryParameterVisitor.cpp @@ -1,13 +1,10 @@ -#include #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -63,11 +60,14 @@ void ReplaceQueryParameterVisitor::visitQueryParameter(ASTPtr & ast) data_type->deserializeAsTextEscaped(temp_column, read_buffer, format_settings); if (!read_buffer.eof()) - throw Exception("Value " + value + " cannot be parsed as " + type_name + " for query parameter '" + ast_param.name + "'" - " because it isn't parsed completely: only " + toString(read_buffer.count()) + " of " + toString(value.size()) + " bytes was parsed: " - + value.substr(0, read_buffer.count()), ErrorCodes::BAD_QUERY_PARAMETER); + throw Exception(ErrorCodes::BAD_QUERY_PARAMETER, + "Value {} cannot be parsed as {} for query parameter '{}'" + " because it isn't parsed completely: only {} of {} bytes was parsed: {}", + value, type_name, ast_param.name, read_buffer.count(), value.size(), value.substr(0, read_buffer.count())); ast = addTypeConversionToAST(std::make_shared(temp_column[0]), type_name); + + /// Keep the original alias. ast->setAlias(alias); } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 27236c99a23..2f779b9d7c1 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -64,8 +64,7 @@ std::shared_ptr createSystemLog( engine = "ENGINE = MergeTree"; if (!partition_by.empty()) engine += " PARTITION BY (" + partition_by + ")"; - engine += " ORDER BY (event_date, event_time)" - "SETTINGS min_bytes_for_wide_part = '10M'"; /// Use polymorphic parts for log tables by default + engine += " ORDER BY (event_date, event_time)"; } size_t flush_interval_milliseconds = config.getUInt64(config_prefix + ".flush_interval_milliseconds", diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 2d624922d2a..c0e9d7edc13 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -191,7 +191,20 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID else if (which_type.isStringOrFixedString()) { if (src.getType() == Field::Types::String) + { + if (which_type.isFixedString()) + { + size_t n = assert_cast(type).getN(); + const auto & src_str = src.get(); + if (src_str.size() < n) + { + String src_str_extended = src_str; + src_str_extended.resize(n); + return src_str_extended; + } + } return src; + } } else if (const DataTypeArray * type_array = typeid_cast(&type)) { diff --git a/src/Interpreters/join_common.cpp b/src/Interpreters/join_common.cpp index 866893fa359..17c289b151d 100644 --- a/src/Interpreters/join_common.cpp +++ b/src/Interpreters/join_common.cpp @@ -185,13 +185,24 @@ void removeLowCardinalityInplace(Block & block) } } -void removeLowCardinalityInplace(Block & block, const Names & names) +void removeLowCardinalityInplace(Block & block, const Names & names, bool change_type) { for (const String & column_name : names) { auto & col = block.getByName(column_name); col.column = recursiveRemoveLowCardinality(col.column); - col.type = recursiveRemoveLowCardinality(col.type); + if (change_type) + col.type = recursiveRemoveLowCardinality(col.type); + } +} + +void restoreLowCardinalityInplace(Block & block) +{ + for (size_t i = 0; i < block.columns(); ++i) + { + auto & col = block.getByPosition(i); + if (col.type->lowCardinality() && col.column && !col.column->lowCardinality()) + col.column = changeLowCardinality(col.column, col.type->createColumn()); } } diff --git a/src/Interpreters/join_common.h b/src/Interpreters/join_common.h index 11fecd4e3fb..cfd727704a0 100644 --- a/src/Interpreters/join_common.h +++ b/src/Interpreters/join_common.h @@ -23,7 +23,8 @@ Columns materializeColumns(const Block & block, const Names & names); ColumnRawPtrs materializeColumnsInplace(Block & block, const Names & names); ColumnRawPtrs getRawPointers(const Columns & columns); void removeLowCardinalityInplace(Block & block); -void removeLowCardinalityInplace(Block & block, const Names & names); +void removeLowCardinalityInplace(Block & block, const Names & names, bool change_type = true); +void restoreLowCardinalityInplace(Block & block); ColumnRawPtrs extractKeysForJoin(const Block & block_keys, const Names & key_names_right); diff --git a/src/Interpreters/sortBlock.cpp b/src/Interpreters/sortBlock.cpp index cb3c36e5356..c2436806fcd 100644 --- a/src/Interpreters/sortBlock.cpp +++ b/src/Interpreters/sortBlock.cpp @@ -132,12 +132,7 @@ void sortBlock(Block & block, const SortDescription & description, UInt64 limit) else if (!isColumnConst(*column)) { int nan_direction_hint = description[0].nulls_direction; - auto special_sort = description[0].special_sort; - - if (special_sort == SpecialSort::OPENCL_BITONIC) - column->getSpecialPermutation(reverse, limit, nan_direction_hint, perm, IColumn::SpecialSort::OPENCL_BITONIC); - else - column->getPermutation(reverse, limit, nan_direction_hint, perm); + column->getPermutation(reverse, limit, nan_direction_hint, perm); } else /// we don't need to do anything with const column @@ -211,13 +206,11 @@ void sortBlock(Block & block, const SortDescription & description, UInt64 limit) for (const auto & column : columns_with_sort_desc) { while (!ranges.empty() && limit && limit <= ranges.back().first) - { ranges.pop_back(); - } + if (ranges.empty()) - { break; - } + column.column->updatePermutation( column.description.direction < 0, limit, column.description.nulls_direction, perm, ranges); } @@ -225,9 +218,7 @@ void sortBlock(Block & block, const SortDescription & description, UInt64 limit) size_t columns = block.columns(); for (size_t i = 0; i < columns; ++i) - { block.getByPosition(i).column = block.getByPosition(i).column->permute(perm, limit); - } } } diff --git a/src/Interpreters/ya.make b/src/Interpreters/ya.make index 540ae7f9f2f..c308fdf5bc3 100644 --- a/src/Interpreters/ya.make +++ b/src/Interpreters/ya.make @@ -23,6 +23,7 @@ SRCS( addTypeConversionToAST.cpp AggregateDescription.cpp Aggregator.cpp + ApplyWithSubqueryVisitor.cpp ArithmeticOperationsInAgrFuncOptimize.cpp ArrayJoinAction.cpp AsynchronousMetricLog.cpp @@ -126,6 +127,7 @@ SRCS( QueryAliasesVisitor.cpp QueryLog.cpp QueryNormalizer.cpp + QueryParameterVisitor.cpp QueryThreadLog.cpp RemoveInjectiveFunctionsVisitor.cpp RenameColumnVisitor.cpp diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index d033cdc79a2..d74156d11d8 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -99,12 +99,19 @@ void ASTAlterCommand::formatImpl( settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "MODIFY COLUMN " << (if_exists ? "IF EXISTS " : "") << (settings.hilite ? hilite_none : ""); col_decl->formatImpl(settings, state, frame); - if (first) - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " FIRST " << (settings.hilite ? hilite_none : ""); - else if (column) /// AFTER + if (!remove_property.empty()) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " AFTER " << (settings.hilite ? hilite_none : ""); - column->formatImpl(settings, state, frame); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " REMOVE " << remove_property; + } + else + { + if (first) + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " FIRST " << (settings.hilite ? hilite_none : ""); + else if (column) /// AFTER + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " AFTER " << (settings.hilite ? hilite_none : ""); + column->formatImpl(settings, state, frame); + } } } else if (type == ASTAlterCommand::COMMENT_COLUMN) @@ -280,6 +287,10 @@ void ASTAlterCommand::formatImpl( settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "MODIFY TTL " << (settings.hilite ? hilite_none : ""); ttl->formatImpl(settings, state, frame); } + else if (type == ASTAlterCommand::REMOVE_TTL) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "REMOVE TTL" << (settings.hilite ? hilite_none : ""); + } else if (type == ASTAlterCommand::MATERIALIZE_TTL) { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "MATERIALIZE TTL" diff --git a/src/Parsers/ASTAlterQuery.h b/src/Parsers/ASTAlterQuery.h index df27ba0a3b0..78e0c726ddf 100644 --- a/src/Parsers/ASTAlterQuery.h +++ b/src/Parsers/ASTAlterQuery.h @@ -36,6 +36,7 @@ public: MATERIALIZE_TTL, MODIFY_SETTING, MODIFY_QUERY, + REMOVE_TTL, ADD_INDEX, DROP_INDEX, @@ -167,6 +168,9 @@ public: /// Target column name ASTPtr rename_to; + /// Which property user want to remove + String remove_property; + String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast(type))); } ASTPtr clone() const override; diff --git a/src/Parsers/ASTShowTablesQuery.cpp b/src/Parsers/ASTShowTablesQuery.cpp index 5a284109cf2..1e8dad13ad3 100644 --- a/src/Parsers/ASTShowTablesQuery.cpp +++ b/src/Parsers/ASTShowTablesQuery.cpp @@ -13,29 +13,41 @@ ASTPtr ASTShowTablesQuery::clone() const return res; } +void ASTShowTablesQuery::formatLike(const FormatSettings & settings) const +{ + if (!like.empty()) + settings.ostr + << (settings.hilite ? hilite_keyword : "") + << (not_like ? " NOT" : "") + << (case_insensitive_like ? " ILIKE " : " LIKE ") + << (settings.hilite ? hilite_none : "") + << std::quoted(like, '\''); +} + +void ASTShowTablesQuery::formatLimit(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + if (limit_length) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " LIMIT " << (settings.hilite ? hilite_none : ""); + limit_length->formatImpl(settings, state, frame); + } +} + void ASTShowTablesQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { if (databases) { settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW DATABASES" << (settings.hilite ? hilite_none : ""); + formatLike(settings); + formatLimit(settings, state, frame); + } else if (clusters) { settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW CLUSTERS" << (settings.hilite ? hilite_none : ""); + formatLike(settings); + formatLimit(settings, state, frame); - if (!like.empty()) - settings.ostr - << (settings.hilite ? hilite_keyword : "") - << (not_like ? " NOT" : "") - << (case_insensitive_like ? " ILIKE " : " LIKE ") - << (settings.hilite ? hilite_none : "") - << std::quoted(like, '\''); - - if (limit_length) - { - settings.ostr << (settings.hilite ? hilite_keyword : "") << " LIMIT " << (settings.hilite ? hilite_none : ""); - limit_length->formatImpl(settings, state, frame); - } } else if (cluster) { @@ -51,25 +63,15 @@ void ASTShowTablesQuery::formatQueryImpl(const FormatSettings & settings, Format settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(from); - if (!like.empty()) - settings.ostr - << (settings.hilite ? hilite_keyword : "") - << (not_like ? " NOT" : "") - << (case_insensitive_like ? " ILIKE " : " LIKE ") - << (settings.hilite ? hilite_none : "") - << std::quoted(like, '\''); + formatLike(settings); - else if (where_expression) + if (where_expression) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : ""); where_expression->formatImpl(settings, state, frame); } - if (limit_length) - { - settings.ostr << (settings.hilite ? hilite_keyword : "") << " LIMIT " << (settings.hilite ? hilite_none : ""); - limit_length->formatImpl(settings, state, frame); - } + formatLimit(settings, state, frame); } } diff --git a/src/Parsers/ASTShowTablesQuery.h b/src/Parsers/ASTShowTablesQuery.h index acf365be91a..43976e8a958 100644 --- a/src/Parsers/ASTShowTablesQuery.h +++ b/src/Parsers/ASTShowTablesQuery.h @@ -36,6 +36,8 @@ public: ASTPtr clone() const override; protected: + void formatLike(const FormatSettings & settings) const; + void formatLimit(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const; void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; }; diff --git a/src/Parsers/ASTWithElement.cpp b/src/Parsers/ASTWithElement.cpp new file mode 100644 index 00000000000..e8dd4ff0498 --- /dev/null +++ b/src/Parsers/ASTWithElement.cpp @@ -0,0 +1,21 @@ +#include + +namespace DB +{ + +ASTPtr ASTWithElement::clone() const +{ + const auto res = std::make_shared(*this); + res->name = name; + res->subquery = subquery->clone(); + res->children.emplace_back(res->subquery); + return res; +} + +void ASTWithElement::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.writeIdentifier(name); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : ""); + subquery->formatImpl(settings, state, frame); +} +} diff --git a/src/Parsers/ASTWithElement.h b/src/Parsers/ASTWithElement.h new file mode 100644 index 00000000000..97c68579fa1 --- /dev/null +++ b/src/Parsers/ASTWithElement.h @@ -0,0 +1,25 @@ +#pragma once + +#include + + +namespace DB +{ +/** subquery in with statement + */ +class ASTWithElement : public IAST +{ +public: + String name; + ASTPtr subquery; + + /** Get the text that identifies this element. */ + String getID(char) const override { return "WithElement"; } + + ASTPtr clone() const override; + +protected: + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; +}; + +} diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index 9930bb649b4..0739f24a688 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -82,12 +82,23 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_where("WHERE"); ParserKeyword s_to("TO"); + ParserKeyword s_remove("REMOVE"); + ParserKeyword s_default("DEFAULT"); + ParserKeyword s_materialized("MATERIALIZED"); + ParserKeyword s_alias("ALIAS"); + ParserKeyword s_comment("COMMENT"); + ParserKeyword s_codec("CODEC"); + ParserKeyword s_ttl("TTL"); + + ParserKeyword s_remove_ttl("REMOVE TTL"); + ParserCompoundIdentifier parser_name; ParserStringLiteral parser_string_literal; + ParserIdentifier parser_remove_property; ParserCompoundColumnDeclaration parser_col_decl; ParserIndexDeclaration parser_idx_decl; ParserConstraintDeclaration parser_constraint_decl; - ParserCompoundColumnDeclaration parser_modify_col_decl(false); + ParserCompoundColumnDeclaration parser_modify_col_decl(false, false, true); ParserPartition parser_partition; ParserExpression parser_exp_elem; ParserList parser_assignment_list( @@ -433,14 +444,33 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_modify_col_decl.parse(pos, command->col_decl, expected)) return false; - if (s_first.ignore(pos, expected)) - command->first = true; - else if (s_after.ignore(pos, expected)) + if (s_remove.ignore(pos, expected)) { - if (!parser_name.parse(pos, command->column, expected)) + if (s_default.ignore(pos, expected)) + command->remove_property = "DEFAULT"; + else if (s_materialized.ignore(pos, expected)) + command->remove_property = "MATERIALIZED"; + else if (s_alias.ignore(pos, expected)) + command->remove_property = "ALIAS"; + else if (s_comment.ignore(pos, expected)) + command->remove_property = "COMMENT"; + else if (s_codec.ignore(pos, expected)) + command->remove_property = "CODEC"; + else if (s_ttl.ignore(pos, expected)) + command->remove_property = "TTL"; + else return false; } - + else + { + if (s_first.ignore(pos, expected)) + command->first = true; + else if (s_after.ignore(pos, expected)) + { + if (!parser_name.parse(pos, command->column, expected)) + return false; + } + } command->type = ASTAlterCommand::MODIFY_COLUMN; } else if (s_modify_order_by.ignore(pos, expected)) @@ -496,6 +526,10 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected return false; command->type = ASTAlterCommand::MODIFY_TTL; } + else if (s_remove_ttl.ignore(pos, expected)) + { + command->type = ASTAlterCommand::REMOVE_TTL; + } else if (s_materialize_ttl.ignore(pos, expected)) { command->type = ASTAlterCommand::MATERIALIZE_TTL; diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 953f657a04e..60933f7384d 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -90,8 +90,10 @@ template class IParserColumnDeclaration : public IParserBase { public: - explicit IParserColumnDeclaration(bool require_type_ = true, bool allow_null_modifiers_ = false) - : require_type(require_type_), allow_null_modifiers(allow_null_modifiers_) + explicit IParserColumnDeclaration(bool require_type_ = true, bool allow_null_modifiers_ = false, bool check_keywords_after_name_ = false) + : require_type(require_type_) + , allow_null_modifiers(allow_null_modifiers_) + , check_keywords_after_name(check_keywords_after_name_) { } @@ -104,6 +106,7 @@ protected: bool require_type = true; bool allow_null_modifiers = false; + bool check_keywords_after_name = false; }; using ParserColumnDeclaration = IParserColumnDeclaration; @@ -122,6 +125,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E ParserKeyword s_comment{"COMMENT"}; ParserKeyword s_codec{"CODEC"}; ParserKeyword s_ttl{"TTL"}; + ParserKeyword s_remove{"REMOVE"}; ParserTernaryOperatorExpression expr_parser; ParserStringLiteral string_literal_parser; ParserCodec codec_parser; @@ -132,6 +136,24 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E if (!name_parser.parse(pos, name, expected)) return false; + const auto column_declaration = std::make_shared(); + tryGetIdentifierNameInto(name, column_declaration->name); + + /// This keyword may occur only in MODIFY COLUMN query. We check it here + /// because ParserDataType parses types as an arbitrary identifiers and + /// doesn't check that parsed string is existing data type. In this way + /// REMOVE keyword can be parsed as data type and further parsing will fail. + /// So we just check this keyword and in case of success return column + /// column declaration with name only. + if (s_remove.checkWithoutMoving(pos, expected)) + { + if (!check_keywords_after_name) + return false; + + node = column_declaration; + return true; + } + /** column name should be followed by type name if it * is not immediately followed by {DEFAULT, MATERIALIZED, ALIAS, COMMENT} */ @@ -197,9 +219,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E return false; } - const auto column_declaration = std::make_shared(); node = column_declaration; - tryGetIdentifierNameInto(name, column_declaration->name); if (type) { diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index d2d7bbf9f21..9f2df82b4b4 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB @@ -74,7 +75,10 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { if (s_with.ignore(pos, expected)) { - if (!exp_list_for_with_clause.parse(pos, with_expression_list, expected)) + if (!ParserList(std::make_unique(), std::make_unique(TokenType::Comma)) + .parse(pos, with_expression_list, expected)) + return false; + if (with_expression_list->children.empty()) return false; } } diff --git a/src/Parsers/ParserShowTablesQuery.cpp b/src/Parsers/ParserShowTablesQuery.cpp index 66ecdf61c58..4586e10a8a3 100644 --- a/src/Parsers/ParserShowTablesQuery.cpp +++ b/src/Parsers/ParserShowTablesQuery.cpp @@ -46,6 +46,25 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (s_databases.ignore(pos)) { query->databases = true; + + if (s_not.ignore(pos, expected)) + query->not_like = true; + + if (bool insensitive = s_ilike.ignore(pos, expected); insensitive || s_like.ignore(pos, expected)) + { + if (insensitive) + query->case_insensitive_like = true; + + if (!like_p.parse(pos, like, expected)) + return false; + } + else if (query->not_like) + return false; + if (s_limit.ignore(pos, expected)) + { + if (!exp_elem.parse(pos, query->limit_length, expected)) + return false; + } } else if (s_clusters.ignore(pos)) { diff --git a/src/Parsers/ParserWithElement.cpp b/src/Parsers/ParserWithElement.cpp new file mode 100644 index 00000000000..048e891f0df --- /dev/null +++ b/src/Parsers/ParserWithElement.cpp @@ -0,0 +1,39 @@ +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +bool ParserWithElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserIdentifier s_ident; + ParserKeyword s_as("AS"); + ParserSubquery s_subquery; + + auto old_pos = pos; + if (ASTPtr name, subquery; + s_ident.parse(pos, name, expected) && s_as.ignore(pos, expected) && s_subquery.parse(pos, subquery, expected)) + { + auto with_element = std::make_shared(); + tryGetIdentifierNameInto(name, with_element->name); + with_element->subquery = subquery; + node = with_element; + } + else + { + pos = old_pos; + ParserExpressionWithOptionalAlias s_expr(false); + if (!s_expr.parse(pos, node, expected)) + return false; + } + return true; +} + + +} diff --git a/src/Parsers/ParserWithElement.h b/src/Parsers/ParserWithElement.h new file mode 100644 index 00000000000..75ad11f5deb --- /dev/null +++ b/src/Parsers/ParserWithElement.h @@ -0,0 +1,18 @@ +#pragma once + +#include + + +namespace DB +{ +/** WITH (scalar query) AS identifier + * or WITH identifier AS (subquery) + */ +class ParserWithElement : public IParserBase +{ +protected: + const char * getName() const override { return "WITH element"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + +} diff --git a/src/Parsers/formatSettingName.cpp b/src/Parsers/formatSettingName.cpp index 3f30142716e..c305496fdb3 100644 --- a/src/Parsers/formatSettingName.cpp +++ b/src/Parsers/formatSettingName.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB diff --git a/src/Parsers/formatSettingName.h b/src/Parsers/formatSettingName.h index c9ed94dcc7d..a700d347a5f 100644 --- a/src/Parsers/formatSettingName.h +++ b/src/Parsers/formatSettingName.h @@ -1,5 +1,6 @@ #pragma once +#include #include diff --git a/src/Parsers/ya.make b/src/Parsers/ya.make index fabf2bbb8fd..0a0c301b722 100644 --- a/src/Parsers/ya.make +++ b/src/Parsers/ya.make @@ -61,6 +61,7 @@ SRCS( ASTTTLElement.cpp ASTUserNameWithHost.cpp ASTWithAlias.cpp + ASTWithElement.cpp CommonParsers.cpp ExpressionElementParsers.cpp ExpressionListParsers.cpp @@ -133,6 +134,7 @@ SRCS( ParserUseQuery.cpp ParserUserNameWithHost.cpp ParserWatchQuery.cpp + ParserWithElement.cpp parseUserName.cpp queryToString.cpp QueryWithOutputSettingsPushDownVisitor.cpp diff --git a/src/Processors/Pipe.cpp b/src/Processors/Pipe.cpp index 90a8a65ff25..9e9c9cab385 100644 --- a/src/Processors/Pipe.cpp +++ b/src/Processors/Pipe.cpp @@ -788,6 +788,15 @@ void Pipe::setLimits(const StreamLocalLimits & limits) } } +void Pipe::setLeafLimits(const SizeLimits & leaf_limits) +{ + for (auto & processor : processors) + { + if (auto * source_with_progress = dynamic_cast(processor.get())) + source_with_progress->setLeafLimits(leaf_limits); + } +} + void Pipe::setQuota(const std::shared_ptr & quota) { for (auto & processor : processors) diff --git a/src/Processors/Pipe.h b/src/Processors/Pipe.h index 4adb529bb1e..f674663154d 100644 --- a/src/Processors/Pipe.h +++ b/src/Processors/Pipe.h @@ -97,6 +97,7 @@ public: /// Specify quotas and limits for every ISourceWithProgress. void setLimits(const StreamLocalLimits & limits); + void setLeafLimits(const SizeLimits & leaf_limits); void setQuota(const std::shared_ptr & quota); /// Do not allow to change the table while the processors of pipe are alive. diff --git a/src/Processors/QueryPlan/ReadFromStorageStep.cpp b/src/Processors/QueryPlan/ReadFromStorageStep.cpp index 2f305e7220c..b085c177ad4 100644 --- a/src/Processors/QueryPlan/ReadFromStorageStep.cpp +++ b/src/Processors/QueryPlan/ReadFromStorageStep.cpp @@ -15,6 +15,7 @@ ReadFromStorageStep::ReadFromStorageStep( TableLockHolder table_lock_, StorageMetadataPtr metadata_snapshot_, StreamLocalLimits & limits_, + SizeLimits & leaf_limits_, std::shared_ptr quota_, StoragePtr storage_, const Names & required_columns_, @@ -26,6 +27,7 @@ ReadFromStorageStep::ReadFromStorageStep( : table_lock(std::move(table_lock_)) , metadata_snapshot(std::move(metadata_snapshot_)) , limits(limits_) + , leaf_limits(leaf_limits_) , quota(std::move(quota_)) , storage(std::move(storage_)) , required_columns(required_columns_) @@ -86,6 +88,16 @@ ReadFromStorageStep::ReadFromStorageStep( pipe.setLimits(limits); + /** + * Leaf size limits should be applied only for local processing of distributed queries. + * Such limits allow to control the read stage on leaf nodes and exclude the merging stage. + * Consider the case when distributed query needs to read from multiple shards. Then leaf + * limits will be applied on the shards only (including the root node) but will be ignored + * on the results merging stage. + */ + if (!storage->isRemote()) + pipe.setLeafLimits(leaf_limits); + if (quota) pipe.setQuota(quota); diff --git a/src/Processors/QueryPlan/ReadFromStorageStep.h b/src/Processors/QueryPlan/ReadFromStorageStep.h index 9c2b9e56450..98cde63a863 100644 --- a/src/Processors/QueryPlan/ReadFromStorageStep.h +++ b/src/Processors/QueryPlan/ReadFromStorageStep.h @@ -26,6 +26,7 @@ public: TableLockHolder table_lock, StorageMetadataPtr metadata_snapshot, StreamLocalLimits & limits, + SizeLimits & leaf_limits, std::shared_ptr quota, StoragePtr storage, const Names & required_columns, @@ -47,6 +48,7 @@ private: TableLockHolder table_lock; StorageMetadataPtr metadata_snapshot; StreamLocalLimits limits; + SizeLimits leaf_limits; std::shared_ptr quota; StoragePtr storage; diff --git a/src/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h index 630c712daef..2e8cf007623 100644 --- a/src/Processors/Sources/SourceFromInputStream.h +++ b/src/Processors/Sources/SourceFromInputStream.h @@ -33,6 +33,7 @@ public: /// Implementation for methods from ISourceWithProgress. void setLimits(const StreamLocalLimits & limits_) final { stream->setLimits(limits_); } + void setLeafLimits(const SizeLimits &) final { } void setQuota(const std::shared_ptr & quota_) final { stream->setQuota(quota_); } void setProcessListElement(QueryStatus * elem) final { stream->setProcessListElement(elem); } void setProgressCallback(const ProgressCallback & callback) final { stream->setProgressCallback(callback); } diff --git a/src/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp index d6972f99369..021bf04d86d 100644 --- a/src/Processors/Sources/SourceWithProgress.cpp +++ b/src/Processors/Sources/SourceWithProgress.cpp @@ -93,6 +93,12 @@ void SourceWithProgress::progress(const Progress & value) } } + if (!leaf_limits.check(rows_to_check_limit, progress.read_bytes, "rows or bytes to read on leaf node", + ErrorCodes::TOO_MANY_ROWS, ErrorCodes::TOO_MANY_BYTES)) + { + cancel(); + } + size_t total_rows = progress.total_rows_to_read; constexpr UInt64 profile_events_update_period_microseconds = 10 * 1000; // 10 milliseconds diff --git a/src/Processors/Sources/SourceWithProgress.h b/src/Processors/Sources/SourceWithProgress.h index fdab345548b..3aa7a81f418 100644 --- a/src/Processors/Sources/SourceWithProgress.h +++ b/src/Processors/Sources/SourceWithProgress.h @@ -17,6 +17,9 @@ public: /// Set limitations that checked on each chunk. virtual void setLimits(const StreamLocalLimits & limits_) = 0; + /// Set limitations that checked on each chunk for distributed queries on leaf nodes. + virtual void setLeafLimits(const SizeLimits & leaf_limits_) = 0; + /// Set the quota. If you set a quota on the amount of raw data, /// then you should also set mode = LIMITS_TOTAL to LocalLimits with setLimits. virtual void setQuota(const std::shared_ptr & quota_) = 0; @@ -46,6 +49,7 @@ public: SourceWithProgress(Block header, bool enable_auto_progress); void setLimits(const StreamLocalLimits & limits_) final { limits = limits_; } + void setLeafLimits(const SizeLimits & leaf_limits_) final {leaf_limits = leaf_limits_; } void setQuota(const std::shared_ptr & quota_) final { quota = quota_; } void setProcessListElement(QueryStatus * elem) final { process_list_elem = elem; } void setProgressCallback(const ProgressCallback & callback) final { progress_callback = callback; } @@ -59,6 +63,7 @@ protected: private: StreamLocalLimits limits; + SizeLimits leaf_limits; std::shared_ptr quota; ProgressCallback progress_callback; QueryStatus * process_list_elem = nullptr; diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index de68635d26e..bc5436f00ee 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -33,7 +33,7 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request /// Even if lag is small, output detailed information about the lag. bool verbose = params.get("verbose", "") == "1"; - const MergeTreeSettings & settings = context.getMergeTreeSettings(); + const MergeTreeSettings & settings = context.getReplicatedMergeTreeSettings(); bool ok = true; std::stringstream message; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index eee85f3e3ae..b1476f388d4 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -962,7 +962,7 @@ void TCPHandler::receiveQuery() /// i.e. when the INSERT is done with the global context (w/o user). if (!client_info.initial_user.empty()) { - query_context->setUserWithoutCheckingPassword(client_info.initial_user, socket().peerAddress()); + query_context->setUserWithoutCheckingPassword(client_info.initial_user, client_info.initial_address); LOG_DEBUG(log, "User (initial): {}", query_context->getUserName()); } /// No need to update connection_context, since it does not requires user (it will not be used for query execution) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index caf98e911ab..3b062c2d689 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -43,6 +43,30 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } +namespace +{ + +AlterCommand::RemoveProperty removePropertyFromString(const String & property) +{ + if (property.empty()) + return AlterCommand::RemoveProperty::NO_PROPERTY; + else if (property == "DEFAULT") + return AlterCommand::RemoveProperty::DEFAULT; + else if (property == "MATERIALIZED") + return AlterCommand::RemoveProperty::MATERIALIZED; + else if (property == "ALIAS") + return AlterCommand::RemoveProperty::ALIAS; + else if (property == "COMMENT") + return AlterCommand::RemoveProperty::COMMENT; + else if (property == "CODEC") + return AlterCommand::RemoveProperty::CODEC; + else if (property == "TTL") + return AlterCommand::RemoveProperty::TTL; + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot remove unknown property '{}'", property); +} + +} std::optional AlterCommand::parse(const ASTAlterCommand * command_ast) { @@ -111,8 +135,9 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.type = AlterCommand::MODIFY_COLUMN; const auto & ast_col_decl = command_ast->col_decl->as(); - command.column_name = ast_col_decl.name; + command.to_remove = removePropertyFromString(command_ast->remove_property); + if (ast_col_decl.type) { command.data_type = data_type_factory.get(ast_col_decl.type); @@ -237,6 +262,13 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.ttl = command_ast->ttl; return command; } + else if (command_ast->type == ASTAlterCommand::REMOVE_TTL) + { + AlterCommand command; + command.ast = command_ast->clone(); + command.type = AlterCommand::REMOVE_TTL; + return command; + } else if (command_ast->type == ASTAlterCommand::MODIFY_SETTING) { AlterCommand command; @@ -301,24 +333,45 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, const Context & con { metadata.columns.modify(column_name, after_column, first, [&](ColumnDescription & column) { - if (codec) - column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type ? data_type : column.type, false); - - if (comment) - column.comment = *comment; - - if (ttl) - column.ttl = ttl; - - if (data_type) - column.type = data_type; - - /// User specified default expression or changed - /// datatype. We have to replace default. - if (default_expression || data_type) + if (to_remove == RemoveProperty::DEFAULT + || to_remove == RemoveProperty::MATERIALIZED + || to_remove == RemoveProperty::ALIAS) { - column.default_desc.kind = default_kind; - column.default_desc.expression = default_expression; + column.default_desc = ColumnDefault{}; + } + else if (to_remove == RemoveProperty::CODEC) + { + column.codec.reset(); + } + else if (to_remove == RemoveProperty::COMMENT) + { + column.comment = String{}; + } + else if (to_remove == RemoveProperty::TTL) + { + column.ttl.reset(); + } + else + { + if (codec) + column.codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(codec, data_type ? data_type : column.type, false); + + if (comment) + column.comment = *comment; + + if (ttl) + column.ttl = ttl; + + if (data_type) + column.type = data_type; + + /// User specified default expression or changed + /// datatype. We have to replace default. + if (default_expression || data_type) + { + column.default_desc.kind = default_kind; + column.default_desc.expression = default_expression; + } } }); @@ -450,6 +503,10 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, const Context & con { metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST(ttl, metadata.columns, context, metadata.primary_key); } + else if (type == REMOVE_TTL) + { + metadata.table_ttl = TTLTableDescription{}; + } else if (type == MODIFY_QUERY) { metadata.select = SelectQueryDescription::getSelectQueryFromASTForMatView(select, context); @@ -584,6 +641,10 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada if (ignore) return false; + /// We remove properties on metadata level + if (isRemovingProperty() || type == REMOVE_TTL) + return false; + if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN) return true; @@ -636,6 +697,11 @@ bool AlterCommand::isTTLAlter(const StorageInMemoryMetadata & metadata) const return ttl_changed; } +bool AlterCommand::isRemovingProperty() const +{ + return to_remove != RemoveProperty::NO_PROPERTY; +} + std::optional AlterCommand::tryConvertToMutationCommand(StorageInMemoryMetadata & metadata, const Context & context) const { if (!isRequireMutationStage(metadata)) @@ -716,6 +782,8 @@ String alterTypeToString(const AlterCommand::Type type) return "MODIFY QUERY"; case AlterCommand::Type::RENAME_COLUMN: return "RENAME COLUMN"; + case AlterCommand::Type::REMOVE_TTL: + return "REMOVE TTL"; } __builtin_unreachable(); } @@ -783,14 +851,15 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata) if (!has_column && command.if_exists) command.ignore = true; - if (has_column && command.data_type) + if (has_column) { auto column_from_table = columns.get(command.column_name); - if (!command.default_expression && column_from_table.default_desc.expression) + if (command.data_type && !command.default_expression && column_from_table.default_desc.expression) { command.default_kind = column_from_table.default_desc.kind; command.default_expression = column_from_table.default_desc.expression; } + } } else if (command.type == AlterCommand::ADD_COLUMN) @@ -857,6 +926,70 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con if (command.codec) CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(command.codec, command.data_type, !context.getSettingsRef().allow_suspicious_codecs); + auto column_default = all_columns.getDefault(column_name); + if (column_default) + { + if (command.to_remove == AlterCommand::RemoveProperty::DEFAULT && column_default->kind != ColumnDefaultKind::Default) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot remove DEFAULT from column {}, because column default type is {}. Use REMOVE {} to delete it", + backQuote(column_name), toString(column_default->kind), toString(column_default->kind)); + } + if (command.to_remove == AlterCommand::RemoveProperty::MATERIALIZED && column_default->kind != ColumnDefaultKind::Materialized) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot remove MATERIALIZED from column {}, because column default type is {}. Use REMOVE {} to delete it", + backQuote(column_name), toString(column_default->kind), toString(column_default->kind)); + } + if (command.to_remove == AlterCommand::RemoveProperty::ALIAS && column_default->kind != ColumnDefaultKind::Alias) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot remove ALIAS from column {}, because column default type is {}. Use REMOVE {} to delete it", + backQuote(column_name), toString(column_default->kind), toString(column_default->kind)); + } + } + + if (command.isRemovingProperty()) + { + if (!column_default && command.to_remove == AlterCommand::RemoveProperty::DEFAULT) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have DEFAULT, cannot remove it", + backQuote(column_name)); + + if (!column_default && command.to_remove == AlterCommand::RemoveProperty::ALIAS) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have ALIAS, cannot remove it", + backQuote(column_name)); + + if (!column_default && command.to_remove == AlterCommand::RemoveProperty::MATERIALIZED) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have MATERIALIZED, cannot remove it", + backQuote(column_name)); + + auto column_from_table = all_columns.get(column_name); + if (command.to_remove == AlterCommand::RemoveProperty::TTL && column_from_table.ttl == nullptr) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have TTL, cannot remove it", + backQuote(column_name)); + if (command.to_remove == AlterCommand::RemoveProperty::CODEC && column_from_table.codec == nullptr) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have TTL, cannot remove it", + backQuote(column_name)); + if (command.to_remove == AlterCommand::RemoveProperty::COMMENT && column_from_table.comment.empty()) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Column {} doesn't have COMMENT, cannot remove it", + backQuote(column_name)); + + } modified_columns.emplace(column_name); } @@ -966,6 +1099,10 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con throw Exception{"Cannot rename column from nested struct to normal column and vice versa", ErrorCodes::BAD_ARGUMENTS}; } } + else if (command.type == AlterCommand::REMOVE_TTL && !metadata.hasAnyTableTTL()) + { + throw Exception{"Table doesn't have any table TTL expression, cannot remove", ErrorCodes::BAD_ARGUMENTS}; + } /// Collect default expressions for MODIFY and ADD comands if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN) diff --git a/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h index 3578507a361..c973b0b6a6f 100644 --- a/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -37,6 +37,22 @@ struct AlterCommand MODIFY_SETTING, MODIFY_QUERY, RENAME_COLUMN, + REMOVE_TTL, + }; + + /// Which property user wants to remove from column + enum class RemoveProperty + { + NO_PROPERTY, + /// Default specifiers + DEFAULT, + MATERIALIZED, + ALIAS, + + /// Other properties + COMMENT, + CODEC, + TTL }; Type type; @@ -107,16 +123,13 @@ struct AlterCommand /// Target column name String rename_to; + /// What to remove from column (or TTL) + RemoveProperty to_remove = RemoveProperty::NO_PROPERTY; + static std::optional parse(const ASTAlterCommand * command); void apply(StorageInMemoryMetadata & metadata, const Context & context) const; - /// Checks that alter query changes data. For MergeTree: - /// * column files (data and marks) - /// * each part meta (columns.txt) - /// in each part on disk (it's not lightweight alter). - bool isModifyingData(const StorageInMemoryMetadata & metadata) const; - /// Check that alter command require data modification (mutation) to be /// executed. For example, cast from Date to UInt16 type can be executed /// without any data modifications. But column drop or modify from UInt16 to @@ -132,6 +145,9 @@ struct AlterCommand /// Checks that any TTL changed by alter bool isTTLAlter(const StorageInMemoryMetadata & metadata) const; + /// Command removing some property from column or table + bool isRemovingProperty() const; + /// If possible, convert alter command to mutation command. In other case /// return empty optional. Some storages may execute mutations after /// metadata changes. @@ -164,9 +180,6 @@ public: /// Commands have to be prepared before apply. void apply(StorageInMemoryMetadata & metadata, const Context & context) const; - /// At least one command modify data on disk. - bool isModifyingData(const StorageInMemoryMetadata & metadata) const; - /// At least one command modify settings. bool isSettingsAlter() const; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 0711d32d802..50b36ced19c 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -97,6 +97,7 @@ void IStorage::read( TableLockHolder table_lock, StorageMetadataPtr metadata_snapshot, StreamLocalLimits & limits, + SizeLimits & leaf_limits, std::shared_ptr quota, const Names & column_names, const SelectQueryInfo & query_info, @@ -106,7 +107,7 @@ void IStorage::read( unsigned num_streams) { auto read_step = std::make_unique( - std::move(table_lock), std::move(metadata_snapshot), limits, std::move(quota), shared_from_this(), + std::move(table_lock), std::move(metadata_snapshot), limits, leaf_limits, std::move(quota), shared_from_this(), column_names, query_info, std::move(context), processed_stage, max_block_size, num_streams); read_step->setStepDescription("Read from " + getName()); diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index dc7c684d5b4..dbd18c9558e 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -288,6 +288,7 @@ public: TableLockHolder table_lock, StorageMetadataPtr metadata_snapshot, StreamLocalLimits & limits, + SizeLimits & leaf_limits, std::shared_ptr quota, const Names & column_names, const SelectQueryInfo & query_info, diff --git a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp index 0f7c1039d72..1159a93d2ef 100644 --- a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp +++ b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp @@ -69,20 +69,29 @@ TemporaryLiveViewCleaner::~TemporaryLiveViewCleaner() void TemporaryLiveViewCleaner::addView(const std::shared_ptr & view) { - if (!view->isTemporary()) + if (!view->isTemporary() || background_thread_should_exit) return; auto current_time = std::chrono::system_clock::now(); auto time_of_next_check = current_time + view->getTimeout(); std::lock_guard lock{mutex}; + if (background_thread_should_exit) + return; + + /// If views.empty() the background thread isn't running or it's going to stop right now. + bool background_thread_is_running = !views.empty(); /// Keep the vector `views` sorted by time of next check. StorageAndTimeOfCheck storage_and_time_of_check{view, time_of_next_check}; views.insert(std::upper_bound(views.begin(), views.end(), storage_and_time_of_check), storage_and_time_of_check); - if (!background_thread.joinable()) + if (!background_thread_is_running) + { + if (background_thread.joinable()) + background_thread.join(); background_thread = ThreadFromGlobalPool{&TemporaryLiveViewCleaner::backgroundThreadFunc, this}; + } background_thread_wake_up.notify_one(); } @@ -95,7 +104,7 @@ void TemporaryLiveViewCleaner::backgroundThreadFunc() { background_thread_wake_up.wait_until(lock, views.front().time_of_check); if (background_thread_should_exit) - return; + break; auto current_time = std::chrono::system_clock::now(); std::vector storages_to_drop; @@ -112,18 +121,22 @@ void TemporaryLiveViewCleaner::backgroundThreadFunc() continue; } - ++it; - if (current_time < time_of_check) break; /// It's not the time to check it yet. + auto storage_id = storage->getStorageID(); + if (!storage->hasUsers() && DatabaseCatalog::instance().getDependencies(storage_id).empty()) + { + /// No users and no dependencies so we can remove the storage. + storages_to_drop.emplace_back(storage_id); + it = views.erase(it); + continue; + } + + /// Calculate time of the next check. time_of_check = current_time + storage->getTimeout(); - auto storage_id = storage->getStorageID(); - if (storage->hasUsers() || !DatabaseCatalog::instance().getDependencies(storage_id).empty()) - continue; - - storages_to_drop.emplace_back(storage_id); + ++it; } lock.unlock(); diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 0e42d267729..f9fb157942a 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -276,7 +276,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( ReadBufferFromString ttl_infos_buffer(ttl_infos_string); assertString("ttl format version: 1\n", ttl_infos_buffer); ttl_infos.read(ttl_infos_buffer); - reservation = data.reserveSpacePreferringTTLRules(sum_files_size, ttl_infos, std::time(nullptr)); + reservation = data.reserveSpacePreferringTTLRules(sum_files_size, ttl_infos, std::time(nullptr), 0, true); } else reservation = data.reserveSpace(sum_files_size); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 5969ecc5baf..97f481eee8a 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -511,7 +511,7 @@ void MergeTreeData::checkTTLExpressions(const StorageInMemoryMetadata & new_meta { for (const auto & move_ttl : new_table_ttl.move_ttl) { - if (!getDestinationForTTL(move_ttl)) + if (!getDestinationForMoveTTL(move_ttl)) { String message; if (move_ttl.destination_type == DataDestinationType::DISK) @@ -1823,16 +1823,27 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( } -void MergeTreeData::renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction) +bool MergeTreeData::renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction) { - auto removed = renameTempPartAndReplace(part, increment, out_transaction); - if (!removed.empty()) - throw Exception("Added part " + part->name + " covers " + toString(removed.size()) - + " existing part(s) (including " + removed[0]->name + ")", ErrorCodes::LOGICAL_ERROR); + if (out_transaction && &out_transaction->data != this) + throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.", + ErrorCodes::LOGICAL_ERROR); + + DataPartsVector covered_parts; + { + auto lock = lockParts(); + if (!renameTempPartAndReplace(part, increment, out_transaction, lock, &covered_parts)) + return false; + } + if (!covered_parts.empty()) + throw Exception("Added part " + part->name + " covers " + toString(covered_parts.size()) + + " existing part(s) (including " + covered_parts[0]->name + ")", ErrorCodes::LOGICAL_ERROR); + + return true; } -void MergeTreeData::renameTempPartAndReplace( +bool MergeTreeData::renameTempPartAndReplace( MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction, std::unique_lock & lock, DataPartsVector * out_covered_parts) { @@ -1863,7 +1874,7 @@ void MergeTreeData::renameTempPartAndReplace( part_info.mutation = 0; /// it's equal to min_block by default part_name = part->getNewName(part_info); } - else + else /// Parts from ReplicatedMergeTree already have names part_name = part->name; LOG_TRACE(log, "Renaming temporary part {} to {}.", part->relative_path, part_name); @@ -1886,7 +1897,7 @@ void MergeTreeData::renameTempPartAndReplace( if (covering_part) { LOG_WARNING(log, "Tried to add obsolete part {} covered by {}", part_name, covering_part->getNameWithState()); - return; + return false; } /// All checks are passed. Now we can rename the part on disk. @@ -1931,6 +1942,8 @@ void MergeTreeData::renameTempPartAndReplace( for (DataPartPtr & covered_part : covered_parts) out_covered_parts->emplace_back(std::move(covered_part)); } + + return true; } MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace( @@ -1968,6 +1981,22 @@ void MergeTreeData::removePartsFromWorkingSet(const MergeTreeData::DataPartsVect } } +void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove) +{ + auto lock = lockParts(); + + for (const auto & part : remove) + { + auto it_part = data_parts_by_info.find(part->info); + if (it_part == data_parts_by_info.end()) + throw Exception("Part " + part->getNameWithState() + " not found in data_parts", ErrorCodes::LOGICAL_ERROR); + + modifyPartState(part, IMergeTreeDataPart::State::Temporary); + /// Erase immediately + data_parts_indexes.erase(it_part); + } +} + void MergeTreeData::removePartsFromWorkingSet(const DataPartsVector & remove, bool clear_without_timeout, DataPartsLock * acquired_lock) { auto lock = (acquired_lock) ? DataPartsLock() : lockParts(); @@ -2961,11 +2990,12 @@ ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, SpacePtr spa ReservationPtr MergeTreeData::reserveSpacePreferringTTLRules(UInt64 expected_size, const IMergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move, - size_t min_volume_index) const + size_t min_volume_index, + bool is_insert) const { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - ReservationPtr reservation = tryReserveSpacePreferringTTLRules(expected_size, ttl_infos, time_of_move, min_volume_index); + ReservationPtr reservation = tryReserveSpacePreferringTTLRules(expected_size, ttl_infos, time_of_move, min_volume_index, is_insert); return checkAndReturnReservation(expected_size, std::move(reservation)); } @@ -2973,24 +3003,27 @@ ReservationPtr MergeTreeData::reserveSpacePreferringTTLRules(UInt64 expected_siz ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_size, const IMergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move, - size_t min_volume_index) const + size_t min_volume_index, + bool is_insert) const { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); auto metadata_snapshot = getInMemoryMetadataPtr(); ReservationPtr reservation; - auto ttl_entry = selectTTLDescriptionForTTLInfos(metadata_snapshot->getMoveTTLs(), ttl_infos.moves_ttl, time_of_move, true); + auto move_ttl_entry = selectTTLDescriptionForTTLInfos(metadata_snapshot->getMoveTTLs(), ttl_infos.moves_ttl, time_of_move, true); - if (ttl_entry) + if (move_ttl_entry) { - SpacePtr destination_ptr = getDestinationForTTL(*ttl_entry); + SpacePtr destination_ptr = getDestinationForMoveTTL(*move_ttl_entry, is_insert); if (!destination_ptr) { - if (ttl_entry->destination_type == DataDestinationType::VOLUME) - LOG_WARNING(log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but volume was not found", ttl_entry->destination_name, log_name); - else if (ttl_entry->destination_type == DataDestinationType::DISK) - LOG_WARNING(log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but disk was not found", ttl_entry->destination_name, log_name); + if (move_ttl_entry->destination_type == DataDestinationType::VOLUME) + LOG_WARNING(log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but volume was not found or rule is not applicable at the moment", + move_ttl_entry->destination_name, log_name); + else if (move_ttl_entry->destination_type == DataDestinationType::DISK) + LOG_WARNING(log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but disk was not found or rule is not applicable at the moment", + move_ttl_entry->destination_name, log_name); } else { @@ -2998,10 +3031,12 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_ if (reservation) return reservation; else - if (ttl_entry->destination_type == DataDestinationType::VOLUME) - LOG_WARNING(log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but there is not enough space", ttl_entry->destination_name, log_name); - else if (ttl_entry->destination_type == DataDestinationType::DISK) - LOG_WARNING(log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but there is not enough space", ttl_entry->destination_name, log_name); + if (move_ttl_entry->destination_type == DataDestinationType::VOLUME) + LOG_WARNING(log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but there is not enough space", + move_ttl_entry->destination_name, log_name); + else if (move_ttl_entry->destination_type == DataDestinationType::DISK) + LOG_WARNING(log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but there is not enough space", + move_ttl_entry->destination_name, log_name); } } @@ -3010,13 +3045,36 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_ return reservation; } -SpacePtr MergeTreeData::getDestinationForTTL(const TTLDescription & ttl) const +SpacePtr MergeTreeData::getDestinationForMoveTTL(const TTLDescription & move_ttl, bool is_insert) const { auto policy = getStoragePolicy(); - if (ttl.destination_type == DataDestinationType::VOLUME) - return policy->getVolumeByName(ttl.destination_name); - else if (ttl.destination_type == DataDestinationType::DISK) - return policy->getDiskByName(ttl.destination_name); + if (move_ttl.destination_type == DataDestinationType::VOLUME) + { + auto volume = policy->getVolumeByName(move_ttl.destination_name); + + if (!volume) + return {}; + + if (is_insert && !volume->perform_ttl_move_on_insert) + return {}; + + return volume; + } + else if (move_ttl.destination_type == DataDestinationType::DISK) + { + auto disk = policy->getDiskByName(move_ttl.destination_name); + if (!disk) + return {}; + + auto volume = policy->getVolume(policy->getVolumeIndexByDisk(disk)); + if (!volume) + return {}; + + if (is_insert && !volume->perform_ttl_move_on_insert) + return {}; + + return disk; + } else return {}; } @@ -3087,6 +3145,25 @@ MergeTreeData::DataPartPtr MergeTreeData::getAnyPartInPartition( return nullptr; } + +void MergeTreeData::Transaction::rollbackPartsToTemporaryState() +{ + if (!isEmpty()) + { + std::stringstream ss; + ss << " Rollbacking parts state to temporary and removing from working set:"; + for (const auto & part : precommitted_parts) + ss << " " << part->relative_path; + ss << "."; + LOG_DEBUG(data.log, "Undoing transaction.{}", ss.str()); + + data.removePartsFromWorkingSetImmediatelyAndSetTemporaryState( + DataPartsVector(precommitted_parts.begin(), precommitted_parts.end())); + } + + clear(); +} + void MergeTreeData::Transaction::rollback() { if (!isEmpty()) @@ -3251,7 +3328,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk( } if (!does_storage_policy_allow_same_disk) throw Exception( - "Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy", ErrorCodes::BAD_ARGUMENTS); + "Could not clone and load part " + quoteString(src_part->getFullPath()) + " because disk does not belong to storage policy", + ErrorCodes::BAD_ARGUMENTS); String dst_part_name = src_part->getNewName(dst_part_info); String tmp_dst_part_name = tmp_part_prefix + dst_part_name; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 82f118a4c0f..0fc5ec43048 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -225,6 +225,10 @@ public: void rollback(); + /// Immediately remove parts from table's data_parts set and change part + /// state to temporary. Useful for new parts which not present in table. + void rollbackPartsToTemporaryState(); + size_t size() const { return precommitted_parts.size(); } bool isEmpty() const { return precommitted_parts.empty(); } @@ -426,7 +430,8 @@ public: /// If out_transaction != nullptr, adds the part in the PreCommitted state (the part will be added to the /// active set later with out_transaction->commit()). /// Else, commits the part immediately. - void renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr); + /// Returns true if part was added. Returns false if part is covered by bigger part. + bool renameTempPartAndAdd(MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr); /// The same as renameTempPartAndAdd but the block range of the part can contain existing parts. /// Returns all parts covered by the added part (in ascending order). @@ -435,10 +440,16 @@ public: MutableDataPartPtr & part, SimpleIncrement * increment = nullptr, Transaction * out_transaction = nullptr); /// Low-level version of previous one, doesn't lock mutex - void renameTempPartAndReplace( + bool renameTempPartAndReplace( MutableDataPartPtr & part, SimpleIncrement * increment, Transaction * out_transaction, DataPartsLock & lock, DataPartsVector * out_covered_parts = nullptr); + + /// Remove parts from working set immediately (without wait for background + /// process). Transfer part state to temporary. Have very limited usage only + /// for new parts which don't already present in table. + void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove); + /// Removes parts from the working set parts. /// Parts in add must already be in data_parts with PreCommitted, Committed, or Outdated states. /// If clear_without_timeout is true, the parts will be deleted at once, or during the next call to @@ -624,13 +635,15 @@ public: UInt64 expected_size, const IMergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move, - size_t min_volume_index = 0) const; + size_t min_volume_index = 0, + bool is_insert = false) const; ReservationPtr tryReserveSpacePreferringTTLRules( UInt64 expected_size, const IMergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move, - size_t min_volume_index = 0) const; + size_t min_volume_index = 0, + bool is_insert = false) const; /// Choose disk with max available free space /// Reserves 0 bytes @@ -638,9 +651,9 @@ public: /// Return alter conversions for part which must be applied on fly. AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const; - /// Returns destination disk or volume for the TTL rule according to current - /// storage policy - SpacePtr getDestinationForTTL(const TTLDescription & ttl) const; + /// Returns destination disk or volume for the TTL rule according to current storage policy + /// 'is_insert' - is TTL move performed on new data part insert. + SpacePtr getDestinationForMoveTTL(const TTLDescription & move_ttl, bool is_insert = false) const; /// Checks if given part already belongs destination disk or volume for the /// TTL rule. diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 89631b713ed..91a34efc2b5 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1079,6 +1079,9 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor auto context_for_reading = context; context_for_reading.setSetting("max_streams_to_max_threads_ratio", 1); context_for_reading.setSetting("max_threads", 1); + /// Allow mutations to work when force_index_by_date or force_primary_key is on. + context_for_reading.setSetting("force_index_by_date", Field(0)); + context_for_reading.setSetting("force_primary_key", Field(0)); MutationCommands commands_for_part; for (const auto & command : commands) diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index e780ebda111..ffd5d616cb0 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -583,6 +583,14 @@ Pipe MergeTreeDataSelectExecutor::readFromParts( { std::atomic total_rows {0}; + SizeLimits limits; + if (settings.read_overflow_mode == OverflowMode::THROW && settings.max_rows_to_read) + limits = SizeLimits(settings.max_rows_to_read, 0, settings.read_overflow_mode); + + SizeLimits leaf_limits; + if (settings.read_overflow_mode_leaf == OverflowMode::THROW && settings.max_rows_to_read_leaf) + leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, 0, settings.read_overflow_mode_leaf); + auto process_part = [&](size_t part_index) { auto & part = parts[part_index]; @@ -610,18 +618,14 @@ Pipe MergeTreeDataSelectExecutor::readFromParts( if (!ranges.ranges.empty()) { - if (settings.read_overflow_mode == OverflowMode::THROW && settings.max_rows_to_read) + if (limits.max_rows || leaf_limits.max_rows) { /// Fail fast if estimated number of rows to read exceeds the limit auto current_rows_estimate = ranges.getRowsCount(); size_t prev_total_rows_estimate = total_rows.fetch_add(current_rows_estimate); size_t total_rows_estimate = current_rows_estimate + prev_total_rows_estimate; - if (total_rows_estimate > settings.max_rows_to_read) - throw Exception( - "Limit for rows (controlled by 'max_rows_to_read' setting) exceeded, max rows: " - + formatReadableQuantity(settings.max_rows_to_read) - + ", estimated rows to read (at least): " + formatReadableQuantity(total_rows_estimate), - ErrorCodes::TOO_MANY_ROWS); + limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read' setting)", ErrorCodes::TOO_MANY_ROWS); + leaf_limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read_leaf' setting)", ErrorCodes::TOO_MANY_ROWS); } parts_with_ranges[part_index] = std::move(ranges); @@ -1498,79 +1502,56 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( } else { - // Do inclusion search, where we only look for one range + /// In case when SELECT's predicate defines a single continuous interval of keys, + /// we can use binary search algorithm to find the left and right endpoint key marks of such interval. + /// The returned value is the minumum range of marks, containing all keys for which KeyCondition holds + + LOG_TRACE(log, "Running binary search on index range for part {} ({} marks)", part->name, marks_count); size_t steps = 0; - auto find_leaf = [&](bool left) -> std::optional + MarkRange result_range; + + size_t searched_left = 0; + size_t searched_right = marks_count; + + while (searched_left + 1 < searched_right) { - std::vector stack = {}; - - MarkRange range = {0, marks_count}; - - steps++; - + const size_t middle = (searched_left + searched_right) / 2; + MarkRange range(0, middle); if (may_be_true_in_range(range)) - stack.emplace_back(range.begin, range.end); + searched_right = middle; + else + searched_left = middle; + ++steps; + } + result_range.begin = searched_left; + LOG_TRACE(log, "Found (LEFT) boundary mark: {}", searched_left); - while (!stack.empty()) - { - range = stack.back(); - stack.pop_back(); + searched_right = marks_count; + while (searched_left + 1 < searched_right) + { + const size_t middle = (searched_left + searched_right) / 2; + MarkRange range(middle, marks_count); + if (may_be_true_in_range(range)) + searched_left = middle; + else + searched_right = middle; + ++steps; + } + result_range.end = searched_right; + LOG_TRACE(log, "Found (RIGHT) boundary mark: {}", searched_right); - if (range.end == range.begin + 1) - { - if (left) - return range.begin; - else - return range.end; - } - else - { - std::vector check_order = {}; + if (result_range.begin < result_range.end && may_be_true_in_range(result_range)) + res.emplace_back(std::move(result_range)); - MarkRange left_range = {range.begin, (range.begin + range.end) / 2}; - MarkRange right_range = {(range.begin + range.end) / 2, range.end}; - - if (left) - { - check_order.emplace_back(left_range.begin, left_range.end); - check_order.emplace_back(right_range.begin, right_range.end); - } - else - { - check_order.emplace_back(right_range.begin, right_range.end); - check_order.emplace_back(left_range.begin, left_range.end); - } - - steps++; - - if (may_be_true_in_range(check_order[0])) - { - stack.emplace_back(check_order[0].begin, check_order[0].end); - continue; - } - - if (may_be_true_in_range(check_order[1])) - stack.emplace_back(check_order[1].begin, check_order[1].end); - else - break; // No mark range would suffice - } - } - - return std::nullopt; - }; - - auto left_leaf = find_leaf(true); - if (left_leaf) - res.emplace_back(left_leaf.value(), find_leaf(false).value()); - - LOG_TRACE(log, "Used optimized inclusion search over index for part {} with {} steps", part->name, steps); + LOG_TRACE(log, "Found {} range in {} steps", res.empty() ? "empty" : "continuous", steps); } return res; } + MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( MergeTreeIndexPtr index_helper, MergeTreeIndexConditionPtr condition, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index e5b684a1361..739aff31a06 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -237,7 +237,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa updateTTL(ttl_entry, move_ttl_infos, move_ttl_infos.moves_ttl[ttl_entry.result_column], block, false); NamesAndTypesList columns = metadata_snapshot->getColumns().getAllPhysical().filter(block.getNames()); - ReservationPtr reservation = data.reserveSpacePreferringTTLRules(expected_size, move_ttl_infos, time(nullptr)); + ReservationPtr reservation = data.reserveSpacePreferringTTLRules(expected_size, move_ttl_infos, time(nullptr), 0, true); VolumePtr volume = data.getStoragePolicy()->getVolume(0); auto new_data_part = data.createPart( diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 586c4393dfb..c5c6a63abc4 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -136,9 +136,9 @@ bool MergeTreePartsMover::selectPartsForMove( ReservationPtr reservation; if (ttl_entry) { - auto destination = data->getDestinationForTTL(*ttl_entry); + auto destination = data->getDestinationForMoveTTL(*ttl_entry); if (destination && !data->isPartInTTLDestination(*ttl_entry, *part)) - reservation = data->tryReserveSpace(part->getBytesOnDisk(), data->getDestinationForTTL(*ttl_entry)); + reservation = data->tryReserveSpace(part->getBytesOnDisk(), data->getDestinationForMoveTTL(*ttl_entry)); } if (reservation) /// Found reservation by TTL rule. diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 7f537ec330a..00580c8d8bb 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -77,24 +77,24 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def) void MergeTreeSettings::sanityCheck(const Settings & query_settings) const { - if (number_of_free_entries_in_pool_to_execute_mutation >= query_settings.background_pool_size) + if (number_of_free_entries_in_pool_to_execute_mutation > query_settings.background_pool_size) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of 'number_of_free_entries_in_pool_to_execute_mutation' setting" " ({}) (default values are defined in section of config.xml" " or the value can be specified per table in SETTINGS section of CREATE TABLE query)" - " is greater or equals to the value of 'background_pool_size'" + " is greater than the value of 'background_pool_size'" " ({}) (the value is defined in users.xml for default profile)." " This indicates incorrect configuration because mutations cannot work with these settings.", number_of_free_entries_in_pool_to_execute_mutation, query_settings.background_pool_size); } - if (number_of_free_entries_in_pool_to_lower_max_size_of_merge >= query_settings.background_pool_size) + if (number_of_free_entries_in_pool_to_lower_max_size_of_merge > query_settings.background_pool_size) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of 'number_of_free_entries_in_pool_to_lower_max_size_of_merge' setting" " ({}) (default values are defined in section of config.xml" " or the value can be specified per table in SETTINGS section of CREATE TABLE query)" - " is greater or equals to the value of 'background_pool_size'" + " is greater than the value of 'background_pool_size'" " ({}) (the value is defined in users.xml for default profile)." " This indicates incorrect configuration because the maximum size of merge will be always lowered.", number_of_free_entries_in_pool_to_lower_max_size_of_merge, diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 8652a6ef691..d26ff5c79bd 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -20,7 +20,7 @@ struct Settings; M(UInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ \ /** Data storing format settings. */ \ - M(UInt64, min_bytes_for_wide_part, 0, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ + M(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ M(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ M(UInt64, min_bytes_for_compact_part, 0, "Experimental. Minimal uncompressed size in bytes to create part in compact format instead of saving it in RAM", 0) \ M(UInt64, min_rows_for_compact_part, 0, "Experimental. Minimal number of rows to create part in compact format instead of saving it in RAM", 0) \ @@ -105,6 +105,9 @@ struct Settings; M(String, storage_policy, "default", "Name of storage disk policy", 0) \ M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ \ + /** Settings for testing purposes */ \ + M(Bool, randomize_part_type, false, "For testing purposes only. Randomizes part type between wide and compact", 0) \ + \ /** Obsolete settings. Kept for backward compatibility only. */ \ M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \ M(UInt64, check_delay_period, 60, "Obsolete setting, does nothing.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index 1ea40989dfc..5696a9cf890 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -27,6 +27,9 @@ namespace ErrorCodes extern const int INSERT_WAS_DEDUPLICATED; extern const int TIMEOUT_EXCEEDED; extern const int NO_ACTIVE_REPLICAS; + extern const int DUPLICATE_DATA_PART; + extern const int PART_IS_TEMPORARILY_LOCKED; + extern const int LOGICAL_ERROR; } @@ -96,7 +99,8 @@ void ReplicatedMergeTreeBlockOutputStream::checkQuorumPrecondition(zkutil::ZooKe auto quorum_status = quorum_status_future.get(); if (quorum_status.error != Coordination::Error::ZNONODE) - throw Exception("Quorum for previous write has not been satisfied yet. Status: " + quorum_status.data, ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE); + throw Exception("Quorum for previous write has not been satisfied yet. Status: " + quorum_status.data, + ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE); /// Both checks are implicitly made also later (otherwise there would be a race condition). @@ -116,7 +120,6 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block) { last_block_is_duplicate = false; - /// TODO Is it possible to not lock the table structure here? storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event); auto zookeeper = storage.getZooKeeper(); @@ -214,170 +217,258 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart( metadata_snapshot->check(part->getColumns()); assertSessionIsNotExpired(zookeeper); - /// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem. - /// We remove the lock just after renaming the part. In case of exception, block number will be marked as abandoned. - /// Also, make deduplication check. If a duplicate is detected, no nodes are created. + String temporary_part_relative_path = part->relative_path; - /// Allocate new block number and check for duplicates - bool deduplicate_block = !block_id.empty(); - String block_id_path = deduplicate_block ? storage.zookeeper_path + "/blocks/" + block_id : ""; - auto block_number_lock = storage.allocateBlockNumber(part->info.partition_id, zookeeper, block_id_path); + /// There is one case when we need to retry transaction in a loop. + /// But don't do it too many times - just as defensive measure. + size_t loop_counter = 0; + constexpr size_t max_iterations = 10; - if (!block_number_lock) + bool is_already_existing_part = false; + + while (true) { - LOG_INFO(log, "Block with ID {} already exists; ignoring it.", block_id); - part->is_duplicate = true; - last_block_is_duplicate = true; - ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks); - return; - } + /// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem. + /// We remove the lock just after renaming the part. In case of exception, block number will be marked as abandoned. + /// Also, make deduplication check. If a duplicate is detected, no nodes are created. - Int64 block_number = block_number_lock->getNumber(); + /// Allocate new block number and check for duplicates + bool deduplicate_block = !block_id.empty(); + String block_id_path = deduplicate_block ? storage.zookeeper_path + "/blocks/" + block_id : ""; + auto block_number_lock = storage.allocateBlockNumber(part->info.partition_id, zookeeper, block_id_path); - /// Set part attributes according to part_number. Prepare an entry for log. + /// Prepare transaction to ZooKeeper + /// It will simultaneously add information about the part to all the necessary places in ZooKeeper and remove block_number_lock. + Coordination::Requests ops; - part->info.min_block = block_number; - part->info.max_block = block_number; - part->info.level = 0; - - String part_name = part->getNewName(part->info); - part->name = part_name; - - StorageReplicatedMergeTree::LogEntry log_entry; - log_entry.type = StorageReplicatedMergeTree::LogEntry::GET_PART; - log_entry.create_time = time(nullptr); - log_entry.source_replica = storage.replica_name; - log_entry.new_part_name = part_name; - log_entry.quorum = quorum; - log_entry.block_id = block_id; - log_entry.new_part_type = part->getType(); - - /// Simultaneously add information about the part to all the necessary places in ZooKeeper and remove block_number_lock. - - /// Information about the part. - Coordination::Requests ops; - - storage.getCommitPartOps(ops, part, block_id_path); - - /// Replication log. - ops.emplace_back(zkutil::makeCreateRequest( - storage.zookeeper_path + "/log/log-", - log_entry.toString(), - zkutil::CreateMode::PersistentSequential)); - - /// Deletes the information that the block number is used for writing. - block_number_lock->getUnlockOps(ops); - - /** If you need a quorum - create a node in which the quorum is monitored. - * (If such a node already exists, then someone has managed to make another quorum record at the same time, but for it the quorum has not yet been reached. - * You can not do the next quorum record at this time.) - */ - if (quorum) - { - ReplicatedMergeTreeQuorumEntry quorum_entry; - quorum_entry.part_name = part_name; - quorum_entry.required_number_of_replicas = quorum; - quorum_entry.replicas.insert(storage.replica_name); - - /** At this point, this node will contain information that the current replica received a part. - * When other replicas will receive this part (in the usual way, processing the replication log), - * they will add themselves to the contents of this node. - * When it contains information about `quorum` number of replicas, this node is deleted, - * which indicates that the quorum has been reached. - */ - - ops.emplace_back( - zkutil::makeCreateRequest( - quorum_info.status_path, - quorum_entry.toString(), - zkutil::CreateMode::Persistent)); - - /// Make sure that during the insertion time, the replica was not reinitialized or disabled (when the server is finished). - ops.emplace_back( - zkutil::makeCheckRequest( - storage.replica_path + "/is_active", - quorum_info.is_active_node_version)); - - /// Unfortunately, just checking the above is not enough, because `is_active` node can be deleted and reappear with the same version. - /// But then the `host` value will change. We will check this. - /// It's great that these two nodes change in the same transaction (see MergeTreeRestartingThread). - ops.emplace_back( - zkutil::makeCheckRequest( - storage.replica_path + "/host", - quorum_info.host_node_version)); - } - - MergeTreeData::Transaction transaction(storage); /// If you can not add a part to ZK, we'll remove it back from the working set. - storage.renameTempPartAndAdd(part, nullptr, &transaction); - - Coordination::Responses responses; - Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT - - if (multi_code == Coordination::Error::ZOK) - { - transaction.commit(); - storage.merge_selecting_task->schedule(); - - /// Lock nodes have been already deleted, do not delete them in destructor - block_number_lock->assumeUnlocked(); - } - else if (multi_code == Coordination::Error::ZCONNECTIONLOSS - || multi_code == Coordination::Error::ZOPERATIONTIMEOUT) - { - /** If the connection is lost, and we do not know if the changes were applied, we can not delete the local part - * if the changes were applied, the inserted block appeared in `/blocks/`, and it can not be inserted again. - */ - transaction.commit(); - storage.enqueuePartForCheck(part->name, MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER); - - /// We do not know whether or not data has been inserted. - throw Exception("Unknown status, client must retry. Reason: " + String(Coordination::errorMessage(multi_code)), - ErrorCodes::UNKNOWN_STATUS_OF_INSERT); - } - else if (Coordination::isUserError(multi_code)) - { - String failed_op_path = zkutil::KeeperMultiException(multi_code, ops, responses).getPathForFirstFailedOp(); - - if (multi_code == Coordination::Error::ZNODEEXISTS && deduplicate_block && failed_op_path == block_id_path) + Int64 block_number = 0; + String existing_part_name; + if (block_number_lock) { - /// Block with the same id have just appeared in table (or other replica), rollback the insertion. - LOG_INFO(log, "Block with ID {} already exists; ignoring it (removing part {})", block_id, part->name); + is_already_existing_part = false; + block_number = block_number_lock->getNumber(); - part->is_duplicate = true; - transaction.rollback(); - last_block_is_duplicate = true; - ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks); - } - else if (multi_code == Coordination::Error::ZNODEEXISTS && failed_op_path == quorum_info.status_path) - { - transaction.rollback(); + /// Set part attributes according to part_number. Prepare an entry for log. - throw Exception("Another quorum insert has been already started", ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE); + part->info.min_block = block_number; + part->info.max_block = block_number; + part->info.level = 0; + + part->name = part->getNewName(part->info); + + /// Will add log entry about new part. + + StorageReplicatedMergeTree::LogEntry log_entry; + log_entry.type = StorageReplicatedMergeTree::LogEntry::GET_PART; + log_entry.create_time = time(nullptr); + log_entry.source_replica = storage.replica_name; + log_entry.new_part_name = part->name; + log_entry.quorum = quorum; + log_entry.block_id = block_id; + log_entry.new_part_type = part->getType(); + + ops.emplace_back(zkutil::makeCreateRequest( + storage.zookeeper_path + "/log/log-", + log_entry.toString(), + zkutil::CreateMode::PersistentSequential)); + + /// Deletes the information that the block number is used for writing. + block_number_lock->getUnlockOps(ops); + + /** If we need a quorum - create a node in which the quorum is monitored. + * (If such a node already exists, then someone has managed to make another quorum record at the same time, + * but for it the quorum has not yet been reached. + * You can not do the next quorum record at this time.) + */ + if (quorum) + { + ReplicatedMergeTreeQuorumEntry quorum_entry; + quorum_entry.part_name = part->name; + quorum_entry.required_number_of_replicas = quorum; + quorum_entry.replicas.insert(storage.replica_name); + + /** At this point, this node will contain information that the current replica received a part. + * When other replicas will receive this part (in the usual way, processing the replication log), + * they will add themselves to the contents of this node. + * When it contains information about `quorum` number of replicas, this node is deleted, + * which indicates that the quorum has been reached. + */ + + ops.emplace_back( + zkutil::makeCreateRequest( + quorum_info.status_path, + quorum_entry.toString(), + zkutil::CreateMode::Persistent)); + + /// Make sure that during the insertion time, the replica was not reinitialized or disabled (when the server is finished). + ops.emplace_back( + zkutil::makeCheckRequest( + storage.replica_path + "/is_active", + quorum_info.is_active_node_version)); + + /// Unfortunately, just checking the above is not enough, because `is_active` + /// node can be deleted and reappear with the same version. + /// But then the `host` value will change. We will check this. + /// It's great that these two nodes change in the same transaction (see MergeTreeRestartingThread). + ops.emplace_back( + zkutil::makeCheckRequest( + storage.replica_path + "/host", + quorum_info.host_node_version)); + } } else { - /// NOTE: We could be here if the node with the quorum existed, but was quickly removed. - transaction.rollback(); - throw Exception("Unexpected logical error while adding block " + toString(block_number) + " with ID '" + block_id + "': " - + Coordination::errorMessage(multi_code) + ", path " + failed_op_path, - ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); + is_already_existing_part = true; + + /// This block was already written to some replica. Get the part name for it. + /// Note: race condition with DROP PARTITION operation is possible. User will get "No node" exception and it is Ok. + existing_part_name = zookeeper->get(storage.zookeeper_path + "/blocks/" + block_id); + + /// If it exists on our replica, ignore it. + if (storage.getActiveContainingPart(existing_part_name)) + { + LOG_INFO(log, "Block with ID {} already exists locally as part {}; ignoring it.", block_id, existing_part_name); + part->is_duplicate = true; + last_block_is_duplicate = true; + ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks); + return; + } + + LOG_INFO(log, "Block with ID {} already exists on other replicas as part {}; will write it locally with that name.", + block_id, existing_part_name); + + /// If it does not exist, we will write a new part with existing name. + /// Note that it may also appear on filesystem right now in PreCommitted state due to concurrent inserts of the same data. + /// It will be checked when we will try to rename directory. + + part->name = existing_part_name; + part->info = MergeTreePartInfo::fromPartName(existing_part_name, storage.format_version); + /// Used only for exception messages. + block_number = part->info.min_block; + + + /// Do not check for duplicate on commit to ZK. + block_id_path.clear(); } - } - else if (Coordination::isHardwareError(multi_code)) - { - transaction.rollback(); - throw Exception("Unrecoverable network error while adding block " + toString(block_number) + " with ID '" + block_id + "': " - + Coordination::errorMessage(multi_code), ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); - } - else - { - transaction.rollback(); - throw Exception("Unexpected ZooKeeper error while adding block " + toString(block_number) + " with ID '" + block_id + "': " - + Coordination::errorMessage(multi_code), ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); + + /// Information about the part. + storage.getCommitPartOps(ops, part, block_id_path); + + MergeTreeData::Transaction transaction(storage); /// If you can not add a part to ZK, we'll remove it back from the working set. + bool renamed = false; + try + { + renamed = storage.renameTempPartAndAdd(part, nullptr, &transaction); + } + catch (const Exception & e) + { + if (e.code() != ErrorCodes::DUPLICATE_DATA_PART + && e.code() != ErrorCodes::PART_IS_TEMPORARILY_LOCKED) + throw; + } + if (!renamed) + { + if (is_already_existing_part) + { + LOG_INFO(log, "Part {} is duplicate and it is already written by concurrent request or fetched; ignoring it.", part->name); + return; + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Part with name {} is already written by concurrent request." + " It should not happen for non-duplicate data parts because unique names are assigned for them. It's a bug", + part->name); + } + + Coordination::Responses responses; + Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT + + if (multi_code == Coordination::Error::ZOK) + { + transaction.commit(); + storage.merge_selecting_task->schedule(); + + /// Lock nodes have been already deleted, do not delete them in destructor + if (block_number_lock) + block_number_lock->assumeUnlocked(); + } + else if (multi_code == Coordination::Error::ZCONNECTIONLOSS + || multi_code == Coordination::Error::ZOPERATIONTIMEOUT) + { + /** If the connection is lost, and we do not know if the changes were applied, we can not delete the local part + * if the changes were applied, the inserted block appeared in `/blocks/`, and it can not be inserted again. + */ + transaction.commit(); + storage.enqueuePartForCheck(part->name, MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER); + + /// We do not know whether or not data has been inserted. + throw Exception("Unknown status, client must retry. Reason: " + String(Coordination::errorMessage(multi_code)), + ErrorCodes::UNKNOWN_STATUS_OF_INSERT); + } + else if (Coordination::isUserError(multi_code)) + { + String failed_op_path = zkutil::KeeperMultiException(multi_code, ops, responses).getPathForFirstFailedOp(); + + if (multi_code == Coordination::Error::ZNODEEXISTS && deduplicate_block && failed_op_path == block_id_path) + { + /// Block with the same id have just appeared in table (or other replica), rollback thee insertion. + LOG_INFO(log, "Block with ID {} already exists (it was just appeared). Renaming part {} back to {}. Will retry write.", + block_id, part->name, temporary_part_relative_path); + + /// We will try to add this part again on the new iteration as it's just a new part. + /// So remove it from storage parts set immediately and transfer state to temporary. + transaction.rollbackPartsToTemporaryState(); + + part->is_temp = true; + part->renameTo(temporary_part_relative_path, false); + + /// If this part appeared on other replica than it's better to try to write it locally one more time. If it's our part + /// than it will be ignored on the next itration. + ++loop_counter; + if (loop_counter == max_iterations) + { + part->is_duplicate = true; /// Part is duplicate, just remove it from local FS + throw Exception("Too many transaction retries - it may indicate an error", ErrorCodes::DUPLICATE_DATA_PART); + } + continue; + } + else if (multi_code == Coordination::Error::ZNODEEXISTS && failed_op_path == quorum_info.status_path) + { + transaction.rollback(); + throw Exception("Another quorum insert has been already started", ErrorCodes::UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE); + } + else + { + /// NOTE: We could be here if the node with the quorum existed, but was quickly removed. + transaction.rollback(); + throw Exception("Unexpected logical error while adding block " + toString(block_number) + " with ID '" + block_id + "': " + + Coordination::errorMessage(multi_code) + ", path " + failed_op_path, + ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); + } + } + else if (Coordination::isHardwareError(multi_code)) + { + transaction.rollback(); + throw Exception("Unrecoverable network error while adding block " + toString(block_number) + " with ID '" + block_id + "': " + + Coordination::errorMessage(multi_code), ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); + } + else + { + transaction.rollback(); + throw Exception("Unexpected ZooKeeper error while adding block " + toString(block_number) + " with ID '" + block_id + "': " + + Coordination::errorMessage(multi_code), ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR); + } + + break; } if (quorum) { + if (is_already_existing_part) + { + /// We get duplicate part without fetch + storage.updateQuorum(part->name); + } + /// We are waiting for quorum to be satisfied. LOG_TRACE(log, "Waiting for quorum"); @@ -397,14 +488,15 @@ void ReplicatedMergeTreeBlockOutputStream::commitPart( ReplicatedMergeTreeQuorumEntry quorum_entry(value); /// If the node has time to disappear, and then appear again for the next insert. - if (quorum_entry.part_name != part_name) + if (quorum_entry.part_name != part->name) break; if (!event->tryWait(quorum_timeout_ms)) throw Exception("Timeout while waiting for quorum", ErrorCodes::TIMEOUT_EXCEEDED); } - /// And what if it is possible that the current replica at this time has ceased to be active and the quorum is marked as failed and deleted? + /// And what if it is possible that the current replica at this time has ceased to be active + /// and the quorum is marked as failed and deleted? String value; if (!zookeeper->tryGet(storage.replica_path + "/is_active", value, nullptr) || value != quorum_info.is_active_node_value) diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index d3af3942428..1188fd2edd3 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -233,6 +234,25 @@ If you use the Replicated version of engines, see https://clickhouse.tech/docs/e } +static void randomizePartTypeSettings(const std::unique_ptr & storage_settings) +{ + static constexpr auto MAX_THRESHOLD_FOR_ROWS = 100000; + static constexpr auto MAX_THRESHOLD_FOR_BYTES = 1024 * 1024 * 10; + + /// Create all parts in wide format with probability 1/3. + if (thread_local_rng() % 3 == 0) + { + storage_settings->min_rows_for_wide_part = 0; + storage_settings->min_bytes_for_wide_part = 0; + } + else + { + storage_settings->min_rows_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_ROWS}(thread_local_rng); + storage_settings->min_bytes_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_BYTES}(thread_local_rng); + } +} + + static StoragePtr create(const StorageFactory::Arguments & args) { /** [Replicated][|Summing|Collapsing|Aggregating|Replacing|Graphite]MergeTree (2 * 7 combinations) engines @@ -515,7 +535,11 @@ static StoragePtr create(const StorageFactory::Arguments & args) StorageInMemoryMetadata metadata; metadata.columns = args.columns; - std::unique_ptr storage_settings = std::make_unique(args.context.getMergeTreeSettings()); + std::unique_ptr storage_settings; + if (replicated) + storage_settings = std::make_unique(args.context.getReplicatedMergeTreeSettings()); + else + storage_settings = std::make_unique(args.context.getMergeTreeSettings()); if (is_extended_storage_def) { @@ -653,6 +677,20 @@ static StoragePtr create(const StorageFactory::Arguments & args) ++arg_num; } + /// Allow to randomize part type for tests to cover more cases. + /// But if settings were set explicitly restrict it. + if (storage_settings->randomize_part_type + && !storage_settings->min_rows_for_wide_part.changed + && !storage_settings->min_bytes_for_wide_part.changed) + { + randomizePartTypeSettings(storage_settings); + LOG_INFO(&Poco::Logger::get(args.table_id.getNameForLogs() + " (registerStorageMergeTree)"), + "Applied setting 'randomize_part_type'. " + "Setting 'min_rows_for_wide_part' changed to {}. " + "Setting 'min_bytes_for_wide_part' changed to {}.", + storage_settings->min_rows_for_wide_part, storage_settings->min_bytes_for_wide_part); + } + if (arg_num != arg_cnt) throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 1d2ba88d3d4..b06434b6317 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -57,6 +57,7 @@ #include #include #include +#include namespace @@ -99,6 +100,12 @@ ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, co auto modified_query_ast = query->clone(); ASTSelectQuery & select_query = modified_query_ast->as(); + + // Get rid of the settings clause so we don't send them to remote. Thus newly non-important + // settings won't break any remote parser. It's also more reasonable since the query settings + // are written into the query context and will be sent by the query pipeline. + select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {}); + if (table_function_ptr) select_query.addTableFunction(table_function_ptr); else @@ -860,6 +867,7 @@ void StorageDistributed::flushClusterNodesAllData() void StorageDistributed::rename(const String & new_path_to_table_data, const StorageID & new_table_id) { + assert(relative_data_path != new_path_to_table_data); if (!relative_data_path.empty()) renameOnDisk(new_path_to_table_data); renameInMemory(new_table_id); @@ -870,10 +878,9 @@ void StorageDistributed::renameOnDisk(const String & new_path_to_table_data) { for (const DiskPtr & disk : data_volume->getDisks()) { - const String path(disk->getPath()); - auto new_path = path + new_path_to_table_data; - Poco::File(path + relative_data_path).renameTo(new_path); + disk->moveDirectory(relative_data_path, new_path_to_table_data); + auto new_path = disk->getPath() + new_path_to_table_data; LOG_DEBUG(log, "Updating path to {}", new_path); std::lock_guard lock(cluster_nodes_mutex); diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 9e60d5bad15..cc47047dc78 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -525,9 +525,12 @@ void StorageFile::rename(const String & new_path_to_table_data, const StorageID if (paths.size() != 1) throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED); + std::string path_new = getTablePath(base_path + new_path_to_table_data, format_name); + if (path_new == paths[0]) + return; + std::unique_lock lock(rwlock); - std::string path_new = getTablePath(base_path + new_path_to_table_data, format_name); Poco::File(Poco::Path(path_new).parent()).createDirectories(); Poco::File(paths[0]).renameTo(path_new); diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index 45ab3293723..e437bfb05f1 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -27,6 +27,8 @@ #include #include +#include + #define DBMS_STORAGE_LOG_DATA_FILE_EXTENSION ".bin" #define DBMS_STORAGE_LOG_MARKS_FILE_NAME "__marks.mrk" @@ -548,17 +550,20 @@ void StorageLog::loadMarks() void StorageLog::rename(const String & new_path_to_table_data, const StorageID & new_table_id) { - std::unique_lock lock(rwlock); + assert(table_path != new_path_to_table_data); + { + std::unique_lock lock(rwlock); - disk->moveDirectory(table_path, new_path_to_table_data); + disk->moveDirectory(table_path, new_path_to_table_data); - table_path = new_path_to_table_data; - file_checker.setPath(table_path + "sizes.json"); + table_path = new_path_to_table_data; + file_checker.setPath(table_path + "sizes.json"); - for (auto & file : files) - file.second.data_file_path = table_path + fileName(file.second.data_file_path); + for (auto & file : files) + file.second.data_file_path = table_path + fileName(file.second.data_file_path); - marks_file_path = table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME; + marks_file_path = table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME; + } renameInMemory(new_table_id); } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 608d983a21e..65c0c5ac313 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -113,6 +113,7 @@ namespace ErrorCodes extern const int ALL_REPLICAS_LOST; extern const int REPLICA_STATUS_CHANGED; extern const int CANNOT_ASSIGN_ALTER; + extern const int DIRECTORY_ALREADY_EXISTS; extern const int ILLEGAL_TYPE_OF_ARGUMENT; } @@ -697,7 +698,9 @@ void StorageReplicatedMergeTree::drop() if (has_metadata_in_zookeeper) { - auto zookeeper = tryGetZooKeeper(); + /// Table can be shut down, restarting thread is not active + /// and calling StorageReplicatedMergeTree::getZooKeeper() won't suffice. + auto zookeeper = global_context.getZooKeeper(); /// If probably there is metadata in ZooKeeper, we don't allow to drop the table. if (is_readonly || !zookeeper) @@ -897,10 +900,17 @@ ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & m if (metadata_diff.ttl_table_changed) { - ParserTTLExpressionList parser; - auto ttl_for_table_ast = parseQuery(parser, metadata_diff.new_ttl_table, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - new_metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST( - ttl_for_table_ast, new_metadata.columns, global_context, new_metadata.primary_key); + if (!metadata_diff.new_ttl_table.empty()) + { + ParserTTLExpressionList parser; + auto ttl_for_table_ast = parseQuery(parser, metadata_diff.new_ttl_table, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + new_metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST( + ttl_for_table_ast, new_metadata.columns, global_context, new_metadata.primary_key); + } + else /// TTL was removed + { + new_metadata.table_ttl = TTLTableDescription{}; + } } } @@ -3314,6 +3324,15 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora part->renameTo("detached/" + part_name, true); } } + catch (const Exception & e) + { + /// The same part is being written right now (but probably it's not committed yet). + /// We will check the need for fetch later. + if (e.code() == ErrorCodes::DIRECTORY_ALREADY_EXISTS) + return false; + + throw; + } catch (...) { if (!to_detached) @@ -3806,7 +3825,12 @@ void StorageReplicatedMergeTree::alter( future_metadata_in_zk.partition_key = serializeAST(*future_metadata.partition_key.expression_list_ast); if (ast_to_str(future_metadata.table_ttl.definition_ast) != ast_to_str(current_metadata->table_ttl.definition_ast)) - future_metadata_in_zk.ttl_table = serializeAST(*future_metadata.table_ttl.definition_ast); + { + if (future_metadata.table_ttl.definition_ast) + future_metadata_in_zk.ttl_table = serializeAST(*future_metadata.table_ttl.definition_ast); + else /// TTL was removed + future_metadata_in_zk.ttl_table = ""; + } String new_indices_str = future_metadata.secondary_indices.toString(); if (new_indices_str != current_metadata->secondary_indices.toString()) @@ -4773,9 +4797,11 @@ void StorageReplicatedMergeTree::fetchPartition( missing_parts.clear(); for (const String & part : parts_to_fetch) { + bool fetched = false; + try { - fetchPart(part, metadata_snapshot, best_replica_path, true, 0, zookeeper); + fetched = fetchPart(part, metadata_snapshot, best_replica_path, true, 0, zookeeper); } catch (const DB::Exception & e) { @@ -4784,8 +4810,10 @@ void StorageReplicatedMergeTree::fetchPartition( throw; LOG_INFO(log, e.displayText()); - missing_parts.push_back(part); } + + if (!fetched) + missing_parts.push_back(part); } ++try_no; diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 3e7fb5b853e..c4344cf6f1f 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -35,6 +35,8 @@ #include #include +#include + namespace DB { @@ -282,12 +284,15 @@ StorageStripeLog::StorageStripeLog( void StorageStripeLog::rename(const String & new_path_to_table_data, const StorageID & new_table_id) { - std::unique_lock lock(rwlock); + assert(table_path != new_path_to_table_data); + { + std::unique_lock lock(rwlock); - disk->moveDirectory(table_path, new_path_to_table_data); + disk->moveDirectory(table_path, new_path_to_table_data); - table_path = new_path_to_table_data; - file_checker.setPath(table_path + "sizes.json"); + table_path = new_path_to_table_data; + file_checker.setPath(table_path + "sizes.json"); + } renameInMemory(new_table_id); } diff --git a/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp index e7fe7e2d5f9..0bdcab8abf4 100644 --- a/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -3,6 +3,7 @@ #include #include +#include #include @@ -407,15 +408,18 @@ void StorageTinyLog::addFiles(const String & column_name, const IDataType & type void StorageTinyLog::rename(const String & new_path_to_table_data, const StorageID & new_table_id) { - std::unique_lock lock(rwlock); + assert(table_path != new_path_to_table_data); + { + std::unique_lock lock(rwlock); - disk->moveDirectory(table_path, new_path_to_table_data); + disk->moveDirectory(table_path, new_path_to_table_data); - table_path = new_path_to_table_data; - file_checker.setPath(table_path + "sizes.json"); + table_path = new_path_to_table_data; + file_checker.setPath(table_path + "sizes.json"); - for (auto & file : files) - file.second.data_file_path = table_path + fileName(file.second.data_file_path); + for (auto & file : files) + file.second.data_file_path = table_path + fileName(file.second.data_file_path); + } renameInMemory(new_table_id); } diff --git a/src/Storages/System/StorageSystemMergeTreeSettings.cpp b/src/Storages/System/StorageSystemMergeTreeSettings.cpp index 4de600ac036..19cbf76f252 100644 --- a/src/Storages/System/StorageSystemMergeTreeSettings.cpp +++ b/src/Storages/System/StorageSystemMergeTreeSettings.cpp @@ -7,7 +7,8 @@ namespace DB { -NamesAndTypesList SystemMergeTreeSettings::getNamesAndTypes() +template +NamesAndTypesList SystemMergeTreeSettings::getNamesAndTypes() { return { {"name", std::make_shared()}, @@ -18,9 +19,11 @@ NamesAndTypesList SystemMergeTreeSettings::getNamesAndTypes() }; } -void SystemMergeTreeSettings::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const +template +void SystemMergeTreeSettings::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - for (const auto & setting : context.getMergeTreeSettings().all()) + const auto & settings = replicated ? context.getReplicatedMergeTreeSettings().all() : context.getMergeTreeSettings().all(); + for (const auto & setting : settings) { res_columns[0]->insert(setting.getName()); res_columns[1]->insert(setting.getValueString()); @@ -30,4 +33,6 @@ void SystemMergeTreeSettings::fillData(MutableColumns & res_columns, const Conte } } +template class SystemMergeTreeSettings; +template class SystemMergeTreeSettings; } diff --git a/src/Storages/System/StorageSystemMergeTreeSettings.h b/src/Storages/System/StorageSystemMergeTreeSettings.h index ac4d9d27505..9f61fa6f780 100644 --- a/src/Storages/System/StorageSystemMergeTreeSettings.h +++ b/src/Storages/System/StorageSystemMergeTreeSettings.h @@ -11,18 +11,22 @@ namespace DB class Context; -/** implements system table "merge_tree_settings", which allows to get information about the current MergeTree settings. +/** implements system table "merge_tree_settings" and "replicated_merge_tree_settings", + * which allows to get information about the current MergeTree settings. */ -class SystemMergeTreeSettings final : public ext::shared_ptr_helper, public IStorageSystemOneBlock +template +class SystemMergeTreeSettings final : public ext::shared_ptr_helper>, + public IStorageSystemOneBlock> { - friend struct ext::shared_ptr_helper; + friend struct ext::shared_ptr_helper>; + public: - std::string getName() const override { return "SystemMergeTreeSettings"; } + std::string getName() const override { return replicated ? "SystemReplicatedMergeTreeSettings" : "SystemMergeTreeSettings"; } static NamesAndTypesList getNamesAndTypes(); protected: - using IStorageSystemOneBlock::IStorageSystemOneBlock; + using IStorageSystemOneBlock>::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 36e4e34361b..2b7ee363f05 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -82,7 +82,8 @@ void attachSystemTablesLocal(IDatabase & system_database) attach(system_database, "functions"); attach(system_database, "events"); attach(system_database, "settings"); - attach(system_database, "merge_tree_settings"); + attach>(system_database, "merge_tree_settings"); + attach>(system_database, "replicated_merge_tree_settings"); attach(system_database, "build_options"); attach(system_database, "formats"); attach(system_database, "table_functions"); diff --git a/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp index d9ddb8e9722..4e7bff0ef41 100644 --- a/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp +++ b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp @@ -3,7 +3,7 @@ #include // I know that inclusion of .cpp is not good at all -#include +#include // NOLINT using namespace DB; static Block getBlockWithSize(size_t required_size_in_bytes, size_t size_of_row_in_bytes) diff --git a/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity_compact_parts.cpp b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity_compact_parts.cpp index f87293dcd5d..09b24c7dad6 100644 --- a/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity_compact_parts.cpp +++ b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity_compact_parts.cpp @@ -3,7 +3,7 @@ #include // I know that inclusion of .cpp is not good at all -#include +#include // NOLINT using namespace DB; diff --git a/src/TableFunctions/TableFunctionNull.cpp b/src/TableFunctions/TableFunctionNull.cpp new file mode 100644 index 00000000000..00283554041 --- /dev/null +++ b/src/TableFunctions/TableFunctionNull.cpp @@ -0,0 +1,43 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "registerTableFunctions.h" + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +StoragePtr TableFunctionNull::executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const +{ + if (const auto * function = ast_function->as()) + { + auto arguments = function->arguments->children; + + if (arguments.size() != 1) + throw Exception("Table function '" + getName() + "' requires 'structure'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + auto structure = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[0], context)->as()->value.safeGet(); + ColumnsDescription columns = parseColumnsListFromString(structure, context); + + auto res = StorageNull::create(StorageID(getDatabaseName(), table_name), columns, ConstraintsDescription()); + res->startup(); + return res; + } + throw Exception("Table function '" + getName() + "' requires 'structure'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); +} + +void registerTableFunctionNull(TableFunctionFactory & factory) +{ + factory.registerFunction(); +} +} diff --git a/src/TableFunctions/TableFunctionNull.h b/src/TableFunctions/TableFunctionNull.h new file mode 100644 index 00000000000..48617352b25 --- /dev/null +++ b/src/TableFunctions/TableFunctionNull.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/* null(structure) - creates a temporary null storage + * + * Used for testing purposes, for convenience writing tests and demos. + */ +class TableFunctionNull : public ITableFunction +{ +public: + static constexpr auto name = "null"; + std::string getName() const override { return name; } +private: + StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Null"; } +}; + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index 25a495a9185..09255c2bd74 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -11,6 +11,7 @@ void registerTableFunctions() registerTableFunctionMerge(factory); registerTableFunctionRemote(factory); registerTableFunctionNumbers(factory); + registerTableFunctionNull(factory); registerTableFunctionZeros(factory); registerTableFunctionFile(factory); registerTableFunctionURL(factory); diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index 8ff64a22fea..ab05187eeab 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -11,6 +11,7 @@ class TableFunctionFactory; void registerTableFunctionMerge(TableFunctionFactory & factory); void registerTableFunctionRemote(TableFunctionFactory & factory); void registerTableFunctionNumbers(TableFunctionFactory & factory); +void registerTableFunctionNull(TableFunctionFactory & factory); void registerTableFunctionZeros(TableFunctionFactory & factory); void registerTableFunctionFile(TableFunctionFactory & factory); void registerTableFunctionURL(TableFunctionFactory & factory); diff --git a/src/TableFunctions/ya.make b/src/TableFunctions/ya.make index c9c80003ffb..03432e2bbbc 100644 --- a/src/TableFunctions/ya.make +++ b/src/TableFunctions/ya.make @@ -19,6 +19,7 @@ SRCS( TableFunctionInput.cpp TableFunctionMerge.cpp TableFunctionMySQL.cpp + TableFunctionNull.cpp TableFunctionNumbers.cpp TableFunctionRemote.cpp TableFunctionURL.cpp diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index c69ef64b807..220d8d801ec 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -431,7 +431,7 @@ }, "Integration tests (release)": { "required_build_properties": { - "compiler": "clang-11", + "compiler": "gcc-10", "package_type": "deb", "build_type": "relwithdebuginfo", "sanitizer": "none", diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 6bfad37d8ad..a3bed189d55 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -506,15 +506,6 @@ def collect_build_flags(client): else: raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) - (stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'") - - if clickhouse_proc.returncode == 0: - if '10485760' in stdout: - result.append(BuildFlags.POLYMORPHIC_PARTS) - else: - raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - return result diff --git a/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml b/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_adaptive_granularity/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_adaptive_granularity/test.py b/tests/integration/test_adaptive_granularity/test.py index 0c5d7bcb63c..ec3169bb995 100644 --- a/tests/integration/test_adaptive_granularity/test.py +++ b/tests/integration/test_adaptive_granularity/test.py @@ -6,45 +6,23 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True) +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', - with_installed_binary=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True) +node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', with_installed_binary=True) +node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) -node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', - with_installed_binary=True) -node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True) +node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', with_installed_binary=True) +node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) -node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, - with_installed_binary=True) -node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, - with_installed_binary=True) +node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True) +node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True) -node9 = cluster.add_instance('node9', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', - 'configs/merge_tree_settings.xml'], with_zookeeper=True, - image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, - with_installed_binary=True) -node10 = cluster.add_instance('node10', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', - 'configs/merge_tree_settings.xml'], with_zookeeper=True, - image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, - with_installed_binary=True) +node9 = cluster.add_instance('node9', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True) +node10 = cluster.add_instance('node10', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True) -node11 = cluster.add_instance('node11', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, - with_installed_binary=True) -node12 = cluster.add_instance('node12', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], - with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, - with_installed_binary=True) +node11 = cluster.add_instance('node11', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True) +node12 = cluster.add_instance('node12', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True) def prepare_single_pair_with_setting(first_node, second_node, group): @@ -296,10 +274,14 @@ def test_mixed_granularity_single_node(start_dynamic_cluster, node): "INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)") def callback(n): - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", - "1") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", - "1") + new_config = """ + + 1 + 0 +""" + + n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) + n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) node.restart_with_latest_version(callback_onstop=callback) node.query("SYSTEM RELOAD CONFIG") @@ -342,10 +324,14 @@ def test_version_update_two_nodes(start_dynamic_cluster): assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '2\n' def callback(n): - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", - "0") - n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", - "0") + new_config = """ + + 0 + 0 +""" + + n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config) + n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config) node12.restart_with_latest_version(callback_onstop=callback) diff --git a/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml b/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml new file mode 100644 index 00000000000..b240c0fcb2a --- /dev/null +++ b/tests/integration/test_backward_compatibility/configs/wide_parts_only.xml @@ -0,0 +1,5 @@ + + + 0 + + diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index bc6d534c50f..d6a6520ed34 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -3,9 +3,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54', - stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54', stay_alive=True, with_installed_binary=True) +node2 = cluster.add_instance('node2', main_configs=['configs/wide_parts_only.xml'], with_zookeeper=True) @pytest.fixture(scope="module") @@ -25,7 +24,7 @@ def start_cluster(): cluster.shutdown() -def test_backward_compatability(start_cluster): +def test_backward_compatability1(start_cluster): node2.query("INSERT INTO t VALUES (today(), 1)") node1.query("SYSTEM SYNC REPLICA t", timeout=10) diff --git a/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py index e57235502d3..30751b8cbba 100644 --- a/tests/integration/test_check_table/test.py +++ b/tests/integration/test_check_table/test.py @@ -21,7 +21,8 @@ def started_cluster(): node1.query(''' CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32) - ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id; + ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id + SETTINGS min_bytes_for_wide_part=0; ''') yield cluster diff --git a/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml b/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_default_compression_codec/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index 3eef292018c..db7dc032dcc 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -6,12 +6,9 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml'], - image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, - with_installed_binary=True) - +node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) +node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True) @pytest.fixture(scope="module") def start_cluster(): diff --git a/tests/integration/test_distributed_ddl_parallel/__init__.py b/tests/integration/test_distributed_ddl_parallel/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_distributed_ddl_parallel/configs/ddl.xml b/tests/integration/test_distributed_ddl_parallel/configs/ddl.xml new file mode 100644 index 00000000000..b926f99c687 --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/configs/ddl.xml @@ -0,0 +1,5 @@ + + + 2 + + diff --git a/tests/integration/test_distributed_ddl_parallel/configs/dict.xml b/tests/integration/test_distributed_ddl_parallel/configs/dict.xml new file mode 100644 index 00000000000..610d55841a0 --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/configs/dict.xml @@ -0,0 +1,26 @@ + + + + slow_dict + + + sleep 7 + TabSeparated + + + + + + + + id + + + value + String + + + + 0 + + diff --git a/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml b/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml new file mode 100644 index 00000000000..8ffa9f024d7 --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + n1 + 9000 + + + + + n2 + 9000 + + + + + diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py new file mode 100644 index 00000000000..96530b111cb --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -0,0 +1,89 @@ +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name +# pylint: disable=line-too-long + +from functools import wraps +import threading +import time +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +def add_instance(name): + main_configs=[ + 'configs/ddl.xml', + 'configs/remote_servers.xml', + ] + dictionaries=[ + 'configs/dict.xml', + ] + return cluster.add_instance(name, + main_configs=main_configs, + dictionaries=dictionaries, + with_zookeeper=True) + +initiator = add_instance('initiator') +n1 = add_instance('n1') +n2 = add_instance('n2') + +@pytest.fixture(scope='module', autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +# verifies that functions executes longer then `sec` +def longer_then(sec): + def wrapper(func): + @wraps(func) + def inner(*args, **kwargs): + ts = time.time() + result = func(*args, **kwargs) + te = time.time() + took = te-ts + assert took >= sec + return result + return inner + return wrapper + +# It takes 7 seconds to load slow_dict. +def thread_reload_dictionary(): + initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster slow_dict') + +# NOTE: uses inner function to exclude slow start_cluster() from timeout. + +def test_dict_load(): + @pytest.mark.timeout(10) + @longer_then(7) + def inner_test(): + initiator.query('SYSTEM RELOAD DICTIONARY slow_dict') + inner_test() + +def test_all_in_parallel(): + @pytest.mark.timeout(10) + @longer_then(7) + def inner_test(): + threads = [] + for _ in range(2): + threads.append(threading.Thread(target=thread_reload_dictionary)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + inner_test() + +def test_two_in_parallel_two_queued(): + @pytest.mark.timeout(19) + @longer_then(14) + def inner_test(): + threads = [] + for _ in range(4): + threads.append(threading.Thread(target=thread_reload_dictionary)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + inner_test() diff --git a/tests/integration/test_distributed_over_live_view/test.py b/tests/integration/test_distributed_over_live_view/test.py index d01a8febd92..9e62aaad982 100644 --- a/tests/integration/test_distributed_over_live_view/test.py +++ b/tests/integration/test_distributed_over_live_view/test.py @@ -55,6 +55,13 @@ def started_cluster(): finally: cluster.shutdown() +def poll_query(node, query, expected, timeout): + """Repeatedly execute query until either expected result is returned or timeout occurs. + """ + start_time = time.time() + while node.query(query) != expected and time.time() - start_time < timeout: + pass + assert node.query(query) == expected @pytest.mark.parametrize("node", NODES.values()[:1]) @pytest.mark.parametrize("source", ["lv_over_distributed_table"]) @@ -64,6 +71,8 @@ class TestLiveViewOverDistributedSuite: node0, node1 = NODES.values() select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV" + select_query_dist_table = "SELECT * FROM distributed_table ORDER BY node, key FORMAT CSV" + select_count_query = "SELECT count() FROM distributed_over_lv" with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: @@ -87,7 +96,17 @@ class TestLiveViewOverDistributedSuite: client1.expect(prompt) client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, select_count_query, "7\n", timeout=60) + print("\n--DEBUG1--") + print(select_query) + print(node0.query(select_query)) + print("---------") + print("\n--DEBUG2--") + print(select_query_dist_table) + print(node0.query(select_query_dist_table)) + print("---------") + client1.send(select_query) client1.expect('"node1",0,0') client1.expect('"node1",1,1') @@ -103,6 +122,7 @@ class TestLiveViewOverDistributedSuite: node0, node1 = NODES.values() select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV" + select_count_query = "SELECT count() FROM distributed_over_lv" with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: @@ -126,7 +146,9 @@ class TestLiveViewOverDistributedSuite: client1.expect(prompt) client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, select_count_query, "7\n", timeout=60) + client1.send(select_query) client1.expect('"node1",0,0') client1.expect('"node2",0,10') @@ -161,7 +183,9 @@ class TestLiveViewOverDistributedSuite: client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, select_query, '"node1",3\n"node2",21\n', timeout=60) + client1.send(select_query) client1.expect('"node1",3') client1.expect('"node2",21') @@ -171,7 +195,9 @@ class TestLiveViewOverDistributedSuite: client1.expect(prompt) client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, select_query, '"node1",12\n"node2",21\n', timeout=60) + client1.send(select_query) client1.expect('"node1",12') client1.expect('"node2",21') @@ -201,7 +227,9 @@ class TestLiveViewOverDistributedSuite: client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, "SELECT count() FROM (%s)" % select_query.rsplit("FORMAT")[0], "3\n", timeout=60) + client1.send(select_query) client1.expect('0,10') client1.expect('1,12') @@ -210,7 +238,9 @@ class TestLiveViewOverDistributedSuite: client2.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)") client2.expect(prompt) - time.sleep(2) + + poll_query(node0, "SELECT count() FROM (%s)" % select_query.rsplit("FORMAT")[0], "4\n", timeout=60) + client1.send(select_query) client1.expect('0,10') client1.expect('1,15') @@ -240,17 +270,9 @@ class TestLiveViewOverDistributedSuite: client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") client2.expect(prompt) - time.sleep(2) - - client1.send("SELECT sum(value) FROM distributed_over_lv") - client1.expect(r"24" + end_of_block) - client1.expect(prompt) + poll_query(node0, "SELECT sum(value) FROM distributed_over_lv", "24\n", timeout=60) client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3), ('node1', 4, 4)") client2.expect(prompt) - time.sleep(2) - - client1.send("SELECT sum(value) FROM distributed_over_lv") - client1.expect(r"31" + end_of_block) - client1.expect(prompt) + poll_query(node0, "SELECT sum(value) FROM distributed_over_lv", "31\n", timeout=60) diff --git a/tests/integration/test_filesystem_layout/test.py b/tests/integration/test_filesystem_layout/test.py index 93c8d3f7033..e2441d0d20d 100644 --- a/tests/integration/test_filesystem_layout/test.py +++ b/tests/integration/test_filesystem_layout/test.py @@ -19,7 +19,7 @@ def test_file_path_escaping(started_cluster): node.query('CREATE DATABASE IF NOT EXISTS test ENGINE = Ordinary') node.query(''' CREATE TABLE test.`T.a_b,l-e!` (`~Id` UInt32) - ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id`; + ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id` SETTINGS min_bytes_for_wide_part = 0; ''') node.query('''INSERT INTO test.`T.a_b,l-e!` VALUES (1);''') node.query('''ALTER TABLE test.`T.a_b,l-e!` FREEZE;''') diff --git a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py index a0a3b0b1cb6..813e72fe7a7 100644 --- a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py @@ -99,6 +99,38 @@ def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_nam mysql_node.query("DROP DATABASE test_database") +def materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name): + mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") + mysql_node.query("CREATE TABLE test_database.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;") + mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")") + mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)") + mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")") + mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)") + + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name)) + assert "test_database" in clickhouse_node.query("SHOW DATABASES") + check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n") + check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", + "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n" + "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n" + "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n" + "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n") + + mysql_node.query("CREATE TABLE test_database.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;") + mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")") + mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)") + mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")") + mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)") + check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY key FORMAT TSV", + "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n" + "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n" + "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n" + "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n") + clickhouse_node.query("DROP DATABASE test_database") + mysql_node.query("DROP DATABASE test_database") + + + def drop_table_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name): mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") diff --git a/tests/integration/test_materialize_mysql_database/test.py b/tests/integration/test_materialize_mysql_database/test.py index c7314fb50d9..81a69dd7c54 100644 --- a/tests/integration/test_materialize_mysql_database/test.py +++ b/tests/integration/test_materialize_mysql_database/test.py @@ -94,10 +94,13 @@ def started_mysql_8_0(): def test_materialize_database_dml_with_mysql_5_7(started_cluster, started_mysql_5_7): materialize_with_ddl.dml_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1") + materialize_with_ddl.materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_5_7, "mysql1") def test_materialize_database_dml_with_mysql_8_0(started_cluster, started_mysql_8_0): materialize_with_ddl.dml_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0") + materialize_with_ddl.materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_8_0, "mysql8_0") + def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_5_7): diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml index d097675ca63..343f248c5fb 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml @@ -25,4 +25,8 @@ + + + 0 + diff --git a/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml index b32770095fc..f3b7f959ce9 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3_with_cache/configs/config.d/storage_conf.xml @@ -18,4 +18,8 @@ + + + 0 + diff --git a/tests/integration/test_merge_tree_s3_with_cache/test.py b/tests/integration/test_merge_tree_s3_with_cache/test.py index 25c08777ae5..d5d6db2fb77 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/test.py +++ b/tests/integration/test_merge_tree_s3_with_cache/test.py @@ -40,7 +40,8 @@ def get_query_stat(instance, hint): return result -def test_write_is_cached(cluster): +@pytest.mark.parametrize("min_rows_for_wide_part,read_requests", [(0, 2), (8192, 1)]) +def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): node = cluster.instances["node"] node.query( @@ -50,8 +51,8 @@ def test_write_is_cached(cluster): data String ) ENGINE=MergeTree() ORDER BY id - SETTINGS storage_policy='s3' - """ + SETTINGS storage_policy='s3', min_rows_for_wide_part={} + """.format(min_rows_for_wide_part) ) node.query("SYSTEM FLUSH LOGS") @@ -63,12 +64,12 @@ def test_write_is_cached(cluster): assert node.query(select_query) == "(0,'data'),(1,'data')" stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 2 # Only .bin files should be accessed from S3. + assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3. node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - -def test_read_after_cache_is_wiped(cluster): +@pytest.mark.parametrize("min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)]) +def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, bin_files): node = cluster.instances["node"] node.query( @@ -78,8 +79,8 @@ def test_read_after_cache_is_wiped(cluster): data String ) ENGINE=MergeTree() ORDER BY id - SETTINGS storage_policy='s3' - """ + SETTINGS storage_policy='s3', min_rows_for_wide_part={} + """.format(min_rows_for_wide_part) ) node.query("SYSTEM FLUSH LOGS") @@ -93,12 +94,12 @@ def test_read_after_cache_is_wiped(cluster): select_query = "SELECT * FROM s3_test" node.query(select_query) stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 4 # .mrk and .bin files should be accessed from S3. + assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3. # After cache is populated again, only .bin files should be accessed from S3. select_query = "SELECT * FROM s3_test order by id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == 2 + assert stat["S3ReadRequestsCount"] == bin_files node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml b/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml new file mode 100644 index 00000000000..42e2173f718 --- /dev/null +++ b/tests/integration/test_mutations_hardlinks/configs/wide_parts_only.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + diff --git a/tests/integration/test_mutations_hardlinks/test.py b/tests/integration/test_mutations_hardlinks/test.py index 103cf7c2e36..b1e538b123b 100644 --- a/tests/integration/test_mutations_hardlinks/test.py +++ b/tests/integration/test_mutations_hardlinks/test.py @@ -8,7 +8,7 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') +node1 = cluster.add_instance('node1', main_configs=['configs/wide_parts_only.xml']) @pytest.fixture(scope="module") diff --git a/tests/integration/test_mutations_with_merge_tree/configs/users.xml b/tests/integration/test_mutations_with_merge_tree/configs/users.xml index e1dd7fb0638..47dea62971b 100644 --- a/tests/integration/test_mutations_with_merge_tree/configs/users.xml +++ b/tests/integration/test_mutations_with_merge_tree/configs/users.xml @@ -3,6 +3,8 @@ 500 + 1 + 1 diff --git a/tests/integration/test_mutations_with_merge_tree/test.py b/tests/integration/test_mutations_with_merge_tree/test.py index 019f8c2ea40..25bc0df8e7c 100644 --- a/tests/integration/test_mutations_with_merge_tree/test.py +++ b/tests/integration/test_mutations_with_merge_tree/test.py @@ -16,7 +16,7 @@ def started_cluster(): instance_test_mutations.query( '''CREATE TABLE test_mutations_with_ast_elements(date Date, a UInt64, b String) ENGINE = MergeTree(date, (a, date), 8192)''') instance_test_mutations.query( - '''INSERT INTO test_mutations_with_ast_elements SELECT '2019-07-29' AS date, 1, toString(number) FROM numbers(1)''') + '''INSERT INTO test_mutations_with_ast_elements SELECT '2019-07-29' AS date, 1, toString(number) FROM numbers(1) SETTINGS force_index_by_date = 0, force_primary_key = 0''') yield cluster finally: cluster.shutdown() @@ -38,14 +38,14 @@ def test_mutations_with_merge_background_task(started_cluster): instance_test_mutations.query('''DETACH TABLE test_mutations_with_ast_elements''') instance_test_mutations.query('''ATTACH TABLE test_mutations_with_ast_elements''') return int(instance.query( - "SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations_with_ast_elements'").rstrip()) + "SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0").rstrip()) if get_done_mutations(instance_test_mutations) == 100: all_done = True break print instance_test_mutations.query( - "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' FORMAT TSVWithNames") + "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames") assert all_done @@ -59,4 +59,4 @@ def test_mutations_with_truncate_table(started_cluster): instance_test_mutations.query("TRUNCATE TABLE test_mutations_with_ast_elements") assert instance_test_mutations.query( - "SELECT COUNT() FROM system.mutations WHERE table = 'test_mutations_with_ast_elements'").rstrip() == '0' + "SELECT COUNT() FROM system.mutations WHERE table = 'test_mutations_with_ast_elements SETTINGS force_index_by_date = 0, force_primary_key = 0'").rstrip() == '0' diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index 58a37c405cb..5b27ff94ddb 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -176,7 +176,7 @@ def test_attach_check_all_parts(attach_check_all_parts_table): exec_bash('cp -pr {} {}'.format(path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0')) error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0") - assert 0 <= error.find('No columns in part 0_5_5_0') + assert 0 <= error.find('No columns in part 0_5_5_0') or 0 <= error.find('No columns.txt in part 0_5_5_0') parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name") assert TSV(parts) == TSV('1_2_2_0\n1_4_4_0') diff --git a/tests/integration/test_polymorphic_parts/configs/compact_parts.xml b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml index e14c3f0ceae..5b3afe65d92 100644 --- a/tests/integration/test_polymorphic_parts/configs/compact_parts.xml +++ b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml @@ -1,5 +1,6 @@ 512 + 0 diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index 1729817cd53..dbbf5c0b4ff 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -44,10 +44,10 @@ def create_tables(name, nodes, node_settings, shard): ORDER BY id SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes}, min_rows_for_wide_part = {min_rows_for_wide_part}, min_rows_for_compact_part = {min_rows_for_compact_part}, + min_bytes_for_wide_part = 0, min_bytes_for_compact_part = 0, in_memory_parts_enable_wal = 1 '''.format(name=name, shard=shard, repl=i, **settings)) - def create_tables_old_format(name, nodes, shard): for i, node in enumerate(nodes): node.query( diff --git a/tests/integration/test_replicated_merge_tree_config/__init__.py b/tests/integration/test_replicated_merge_tree_config/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_replicated_merge_tree_config/configs/config.xml b/tests/integration/test_replicated_merge_tree_config/configs/config.xml new file mode 100644 index 00000000000..d760d05f1bc --- /dev/null +++ b/tests/integration/test_replicated_merge_tree_config/configs/config.xml @@ -0,0 +1,9 @@ + + + + 100 + + + 200 + + diff --git a/tests/integration/test_replicated_merge_tree_config/test.py b/tests/integration/test_replicated_merge_tree_config/test.py new file mode 100644 index 00000000000..2a7725960bf --- /dev/null +++ b/tests/integration/test_replicated_merge_tree_config/test.py @@ -0,0 +1,36 @@ +import pytest +from helpers.cluster import ClickHouseCluster +import logging + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/config.xml"], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def drop_table(start_cluster): + yield + for node in cluster.instances.values(): + node.query("DROP TABLE IF EXISTS test1") + node.query("DROP TABLE IF EXISTS test2") + + +def test_replicated_merge_tree_settings(start_cluster): + node.query("CREATE TABLE test1 (id Int64) ENGINE MergeTree ORDER BY id") + node.query( + "CREATE TABLE test2 (id Int64) ENGINE ReplicatedMergeTree('/clickhouse/test', 'test') ORDER BY id" + ) + + assert "index_granularity = 100" in node.query("SHOW CREATE test1") + assert "index_granularity = 200" in node.query("SHOW CREATE test2") diff --git a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml index b32770095fc..f3b7f959ce9 100644 --- a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml @@ -18,4 +18,8 @@ + + + 0 + diff --git a/tests/integration/test_replicated_merge_tree_s3/test.py b/tests/integration/test_replicated_merge_tree_s3/test.py index de6f5e9f868..1414905759a 100644 --- a/tests/integration/test_replicated_merge_tree_s3/test.py +++ b/tests/integration/test_replicated_merge_tree_s3/test.py @@ -32,7 +32,8 @@ def cluster(): FILES_OVERHEAD = 1 FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files -FILES_OVERHEAD_PER_PART = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1 +FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1 +FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 def random_string(length): @@ -46,7 +47,7 @@ def generate_values(date_str, count, sign=1): return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data]) -def create_table(cluster): +def create_table(cluster, additional_settings=None): create_table_statement = """ CREATE TABLE s3_test ( dt Date, @@ -58,6 +59,9 @@ def create_table(cluster): ORDER BY (dt, id) SETTINGS storage_policy='s3' """ + if additional_settings: + create_table_statement += "," + create_table_statement += additional_settings for node in cluster.instances.values(): node.query(create_table_statement) @@ -74,9 +78,15 @@ def drop_table(cluster): for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): minio.remove_object(cluster.minio_bucket, obj.object_name) - -def test_insert_select_replicated(cluster): - create_table(cluster) +@pytest.mark.parametrize( + "min_rows_for_wide_part,files_per_part", + [ + (0, FILES_OVERHEAD_PER_PART_WIDE), + (8192, FILES_OVERHEAD_PER_PART_COMPACT) + ] +) +def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): + create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) all_values = "" for node_idx in range(1, 4): @@ -93,5 +103,4 @@ def test_insert_select_replicated(cluster): settings={"select_sequential_consistency": 1}) == all_values minio = cluster.minio_client - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * ( - FILES_OVERHEAD + FILES_OVERHEAD_PER_PART * 3) + assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (FILES_OVERHEAD + files_per_part * 3) diff --git a/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml index 47bf9f56cdd..e96bde89ca9 100644 --- a/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml @@ -83,6 +83,18 @@ + + + +
+ jbod1 +
+ + external + false + +
+
diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index ad822bc6545..377ee0e5d75 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1102,3 +1102,48 @@ limitations under the License.""" finally: node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name)) + + +@pytest.mark.parametrize("name,dest_type,engine", [ + ("mt_test_disabled_ttl_move_on_insert_work", "DISK", "MergeTree()"), + ("mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "MergeTree()"), + ("replicated_mt_test_disabled_ttl_move_on_insert_work", "DISK", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')"), + ("replicated_mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')"), +]) +def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine): + try: + node1.query(""" + CREATE TABLE {name} ( + s1 String, + d1 DateTime + ) ENGINE = {engine} + ORDER BY tuple() + TTL d1 TO {dest_type} 'external' + SETTINGS storage_policy='jbod_without_instant_ttl_move' + """.format(name=name, dest_type=dest_type, engine=engine)) + + node1.query("SYSTEM STOP MOVES {}".format(name)) + + data = [] # 10MB in total + for i in range(10): + data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format( + time.time() - 1))) # 1MB row + + node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + + used_disks = get_used_disks_for_table(node1, name) + assert set(used_disks) == {"jbod1"} + assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + + node1.query("SYSTEM START MOVES {}".format(name)) + time.sleep(3) + + used_disks = get_used_disks_for_table(node1, name) + assert set(used_disks) == {"external"} + assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + + finally: + try: + node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) + except: + pass diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 6e988023951..878db2da11f 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -29,7 +29,7 @@ def drop_table(nodes, table_name): node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) time.sleep(1) - +# Column TTL works only with wide parts, because it's very expensive to apply it for compact parts def test_ttl_columns(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: @@ -37,7 +37,7 @@ def test_ttl_columns(started_cluster): ''' CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0; + ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0; '''.format(replica=node.name)) node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)") @@ -58,7 +58,8 @@ def test_merge_with_ttl_timeout(started_cluster): ''' CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date); + ORDER BY id PARTITION BY toDayOfMonth(date) + SETTINGS min_bytes_for_wide_part=0; '''.format(replica=node.name, table=table)) node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) @@ -204,7 +205,7 @@ def test_ttl_double_delete_rule_returns_error(started_cluster): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0; + TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0 '''.format(replica=node1.name)) assert False except client.QueryRuntimeException: @@ -254,6 +255,7 @@ limitations under the License.""" ) ENGINE = {engine} ORDER BY tuple() TTL d1 + INTERVAL 1 DAY DELETE + SETTINGS min_bytes_for_wide_part=0 """.format(name=name, engine=engine)) node1.query("""ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format(name=name)) diff --git a/tests/performance/array_index_low_cardinality_strings.xml b/tests/performance/array_index_low_cardinality_strings.xml index bbfea083f0a..896a5923a9e 100644 --- a/tests/performance/array_index_low_cardinality_strings.xml +++ b/tests/performance/array_index_low_cardinality_strings.xml @@ -1,4 +1,4 @@ - + DROP TABLE IF EXISTS perf_lc_str CREATE TABLE perf_lc_str( str LowCardinality(String), diff --git a/tests/performance/collations.xml b/tests/performance/collations.xml index 17b2d36b7e3..40153a48d07 100644 --- a/tests/performance/collations.xml +++ b/tests/performance/collations.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/performance/column_column_comparison.xml b/tests/performance/column_column_comparison.xml index 2b59a65a54b..dd77ba24043 100644 --- a/tests/performance/column_column_comparison.xml +++ b/tests/performance/column_column_comparison.xml @@ -1,4 +1,4 @@ - + comparison diff --git a/tests/performance/columns_hashing.xml b/tests/performance/columns_hashing.xml index ac3d4b1b33b..3ea2e013acc 100644 --- a/tests/performance/columns_hashing.xml +++ b/tests/performance/columns_hashing.xml @@ -1,15 +1,12 @@ - - - columns_hashing - - + - test.hits + hits_10m_single + hits_100m_single - - - - - + select sum(UserID + 1 in (select UserID from hits_10m_single)) from hits_10m_single + select sum((UserID + 1, RegionID) in (select UserID, RegionID from hits_10m_single)) from hits_10m_single + select sum(URL in (select URL from hits_10m_single where URL != '')) from hits_10m_single + select sum(MobilePhoneModel in (select MobilePhoneModel from hits_100m_single where MobilePhoneModel != '')) from hits_100m_single + select sum((MobilePhoneModel, UserID + 1) in (select MobilePhoneModel, UserID from hits_100m_single where MobilePhoneModel != '')) from hits_100m_single diff --git a/tests/performance/count.xml b/tests/performance/count.xml index b75fd4e4df5..4b8b00f48db 100644 --- a/tests/performance/count.xml +++ b/tests/performance/count.xml @@ -1,4 +1,4 @@ - + CREATE TABLE data(k UInt64, v UInt64) ENGINE = MergeTree ORDER BY k INSERT INTO data SELECT number, 1 from numbers(10000000) diff --git a/tests/performance/date_parsing.xml b/tests/performance/date_parsing.xml index ffe4ffb9799..15d267dbde5 100644 --- a/tests/performance/date_parsing.xml +++ b/tests/performance/date_parsing.xml @@ -1,4 +1,4 @@ - + hits_100m_single diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index 142d9388404..615c3201843 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -1,4 +1,4 @@ - + 35G diff --git a/tests/performance/empty_string_serialization.xml b/tests/performance/empty_string_serialization.xml index 303283f08c7..d82bcf998aa 100644 --- a/tests/performance/empty_string_serialization.xml +++ b/tests/performance/empty_string_serialization.xml @@ -1,4 +1,4 @@ - +