mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge branch 'master' into support_orc_reader_timezone
This commit is contained in:
commit
99015189a4
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -2,3 +2,4 @@ contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
tests/queries/0_stateless/data_json/* binary
|
||||
tests/queries/0_stateless/*.reference -crlf
|
||||
src/Core/SettingsChangesHistory.cpp merge=union
|
||||
|
@ -13,5 +13,4 @@ rules:
|
||||
level: warning
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
document-start:
|
||||
present: false
|
||||
document-start: disable
|
||||
|
@ -34,7 +34,7 @@
|
||||
* Add `_time` virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||
* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Add `http_response_headers` setting to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
|
||||
* Add `http_response_headers` configuration to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
|
||||
* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)). This is useful for testing.
|
||||
* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
|
@ -319,7 +319,6 @@ endif()
|
||||
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
|
||||
|
||||
# Our built-in unwinder only supports DWARF version up to 4.
|
||||
set (DEBUG_INFO_FLAGS "-g")
|
||||
|
||||
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
|
||||
@ -333,15 +332,15 @@ endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
|
@ -1,32 +1,3 @@
|
||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
||||
//
|
||||
//=== itoa.cpp - Fast integer to ascii conversion --*- C++ -*-//
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <base/defines.h>
|
||||
#include <base/extended_types.h>
|
||||
@ -34,99 +5,15 @@
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T>
|
||||
ALWAYS_INLINE inline constexpr T pow10(size_t x)
|
||||
{
|
||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
||||
}
|
||||
|
||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
||||
// This strength reduction is also done by optimizing compilers, but
|
||||
// presently the fastest results are produced by using the values
|
||||
// for the multiplication and the shift as given by the algorithm
|
||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
||||
//
|
||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
||||
//
|
||||
// "Integer division by a constant (all processors)
|
||||
// A floating point number can be divided by a constant by multiplying
|
||||
// with the reciprocal. If we want to do the same with integers, we have
|
||||
// to scale the reciprocal by 2n and then shift the product to the right
|
||||
// by n. There are various algorithms for finding a suitable value of n
|
||||
// and compensating for rounding errors. The algorithm described below
|
||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
||||
|
||||
/// Division by constant is performed by:
|
||||
/// 1. Adding 1 if needed;
|
||||
/// 2. Multiplying by another constant;
|
||||
/// 3. Shifting right by another constant.
|
||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
||||
struct Division
|
||||
{
|
||||
static constexpr bool add{add_};
|
||||
static constexpr UInt multiplier{multiplier_};
|
||||
static constexpr unsigned shift{shift_};
|
||||
};
|
||||
|
||||
/// Select a type with appropriate number of bytes from the list of types.
|
||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
||||
template <size_t N, typename T, typename... Ts>
|
||||
struct SelectType
|
||||
{
|
||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
struct SelectType<1, T, Ts...>
|
||||
{
|
||||
using Result = T;
|
||||
};
|
||||
|
||||
|
||||
/// Division by 10^N where N is the size of the type.
|
||||
template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType<
|
||||
N,
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
using UnsignedOfSize = typename SelectType<N, uint8_t, uint16_t, uint32_t, uint64_t, __uint128_t>::Result;
|
||||
|
||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
||||
template <size_t N>
|
||||
struct QuotientAndRemainder
|
||||
{
|
||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
QuotientAndRemainder<N> inline split(UnsignedOfSize<N> value)
|
||||
{
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * outDigit(char * p, uint8_t value)
|
||||
ALWAYS_INLINE inline char * outOneDigit(char * p, uint8_t value)
|
||||
{
|
||||
*p = '0' + value;
|
||||
++p;
|
||||
return p;
|
||||
return p + 1;
|
||||
}
|
||||
|
||||
// Using a lookup table to convert binary numbers from 0 to 99
|
||||
// into ascii characters as described by Andrei Alexandrescu in
|
||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||
|
||||
const char digits[201] = "00010203040506070809"
|
||||
"10111213141516171819"
|
||||
"20212223242526272829"
|
||||
@ -137,7 +24,6 @@ const char digits[201] = "00010203040506070809"
|
||||
"70717273747576777879"
|
||||
"80818283848586878889"
|
||||
"90919293949596979899";
|
||||
|
||||
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
||||
{
|
||||
memcpy(p, &digits[value * 2], 2);
|
||||
@ -145,153 +31,260 @@ ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
||||
return p;
|
||||
}
|
||||
|
||||
namespace convert
|
||||
namespace jeaiii
|
||||
{
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * head(char * p, UInt u);
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * tail(char * p, UInt u);
|
||||
/*
|
||||
MIT License
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// head: find most significant digit, skip leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
Copyright (c) 2022 James Edward Anhalt III - https://github.com/jeaiii/itoa
|
||||
|
||||
// "x" contains quotient and remainder after division by 10^N
|
||||
// quotient is less than 10^N
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, QuotientAndRemainder<N> x)
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
struct pair
|
||||
{
|
||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
char dd[2];
|
||||
constexpr pair(char c) : dd{c, '\0'} { } /// NOLINT(google-explicit-constructor)
|
||||
constexpr pair(int n) : dd{"0123456789"[n / 10], "0123456789"[n % 10]} { } /// NOLINT(google-explicit-constructor)
|
||||
};
|
||||
|
||||
// "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, UInt u)
|
||||
constexpr struct
|
||||
{
|
||||
return u < pow10<UnsignedOfSize<N>>(N) ? head(p, UnsignedOfSize<N / 2>(u)) : head<N>(p, split<N>(u));
|
||||
}
|
||||
pair dd[100]{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, //
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, //
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, //
|
||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, //
|
||||
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, //
|
||||
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, //
|
||||
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, //
|
||||
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, //
|
||||
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, //
|
||||
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, //
|
||||
};
|
||||
pair fd[100]{
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', //
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, //
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, //
|
||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, //
|
||||
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, //
|
||||
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, //
|
||||
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, //
|
||||
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, //
|
||||
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, //
|
||||
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, //
|
||||
};
|
||||
} digits;
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
constexpr UInt64 mask24 = (UInt64(1) << 24) - 1;
|
||||
constexpr UInt64 mask32 = (UInt64(1) << 32) - 1;
|
||||
constexpr UInt64 mask57 = (UInt64(1) << 57) - 1;
|
||||
|
||||
template <bool, class, class F>
|
||||
struct _cond
|
||||
{
|
||||
return u < 10 ? outDigit(p, u) : outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// tail: produce all digits including leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// recursive step, "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * tail(char * p, UInt u)
|
||||
using type = F;
|
||||
};
|
||||
template <class T, class F>
|
||||
struct _cond<true, T, F>
|
||||
{
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
using type = T;
|
||||
};
|
||||
template <bool B, class T, class F>
|
||||
using cond = typename _cond<B, T, F>::type;
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
template <class T>
|
||||
inline ALWAYS_INLINE char * to_text_from_integer(char * b, T i)
|
||||
{
|
||||
return outTwoDigits(p, u);
|
||||
}
|
||||
constexpr auto q = sizeof(T);
|
||||
using U = cond<q == 1, char8_t, cond<q <= sizeof(UInt16), UInt16, cond<q <= sizeof(UInt32), UInt32, UInt64>>>;
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// large values are >= 10^2*N
|
||||
// where x contains quotient and remainder after division by 10^N
|
||||
//===----------------------------------------------------------===//
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * large(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
||||
p = tail(p, y.remainder);
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
// convert bool to int before test with unary + to silence warning if T happens to be bool
|
||||
U const n = +i < 0 ? *b++ = '-', U(0) - U(i) : U(i);
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle values of "u" that might be >= 10^2*N
|
||||
// where N is the size of "u" in bytes
|
||||
//===----------------------------------------------------------===//
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
ALWAYS_INLINE inline char * uitoa(char * p, UInt u)
|
||||
{
|
||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
||||
return head(p, UnsignedOfSize<N / 2>(u));
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
if (n < U(1e2))
|
||||
{
|
||||
/// This is changed from the original jeaiii implementation
|
||||
/// For small numbers the extra branch to call outOneDigit() is worth it as it saves some instructions
|
||||
/// and a memory access (no need to read digits.fd[n])
|
||||
/// This is not true for pure random numbers, but that's not the common use case of a database
|
||||
/// Original jeaii code
|
||||
// *reinterpret_cast<pair *>(b) = digits.fd[n];
|
||||
// return n < 10 ? b + 1 : b + 2;
|
||||
return n < 10 ? outOneDigit(b, n) : outTwoDigits(b, n);
|
||||
}
|
||||
if (n < UInt32(1e6))
|
||||
{
|
||||
if (sizeof(U) == 1 || n < U(1e4))
|
||||
{
|
||||
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * n;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||
if constexpr (sizeof(U) == 1)
|
||||
b -= 1;
|
||||
else
|
||||
b -= n < U(1e3);
|
||||
auto f2 = (f0 & mask24) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||
return b + 4;
|
||||
}
|
||||
auto f0 = UInt64(10 * (1ull << 32ull) / 1e5 + 1) * n;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||
if constexpr (sizeof(U) == 2)
|
||||
b -= 1;
|
||||
else
|
||||
b -= n < U(1e5);
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
return b + 6;
|
||||
}
|
||||
if (sizeof(U) == 4 || n < UInt64(1ull << 32ull))
|
||||
{
|
||||
if (n < U(1e8))
|
||||
{
|
||||
auto f0 = UInt64(10 * (1ull << 48ull) / 1e7 + 1) * n >> 16;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||
b -= n < U(1e7);
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
auto f6 = (f4 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||
return b + 8;
|
||||
}
|
||||
auto f0 = UInt64(10 * (1ull << 57ull) / 1e9 + 1) * n;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 57];
|
||||
b -= n < UInt32(1e9);
|
||||
auto f2 = (f0 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 57];
|
||||
auto f4 = (f2 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 57];
|
||||
auto f6 = (f4 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 57];
|
||||
auto f8 = (f6 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 8) = digits.dd[f8 >> 57];
|
||||
return b + 10;
|
||||
}
|
||||
|
||||
return u < pow10<UnsignedOfSize<N>>(2 * N) ? head<N>(p, x) : large<N>(p, x);
|
||||
}
|
||||
// if we get here U must be UInt64 but some compilers don't know that, so reassign n to a UInt64 to avoid warnings
|
||||
UInt32 z = n % UInt32(1e8);
|
||||
UInt64 u = n / UInt32(1e8);
|
||||
|
||||
// selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
if (u < 10)
|
||||
return outDigit(p, u);
|
||||
else if (u < 100)
|
||||
return outTwoDigits(p, u);
|
||||
if (u < UInt32(1e2))
|
||||
{
|
||||
// u can't be 1 digit (if u < 10 it would have been handled above as a 9 digit 32bit number)
|
||||
*reinterpret_cast<pair *>(b) = digits.dd[u];
|
||||
b += 2;
|
||||
}
|
||||
else if (u < UInt32(1e6))
|
||||
{
|
||||
if (u < UInt32(1e4))
|
||||
{
|
||||
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * u;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||
b -= u < UInt32(1e3);
|
||||
auto f2 = (f0 & mask24) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||
b += 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto f0 = UInt64(10 * (1ull << 32ull) / 1e5 + 1) * u;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||
b -= u < UInt32(1e5);
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
b += 6;
|
||||
}
|
||||
}
|
||||
else if (u < UInt32(1e8))
|
||||
{
|
||||
auto f0 = UInt64(10 * (1ull << 48ull) / 1e7 + 1) * u >> 16;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||
b -= u < UInt32(1e7);
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
auto f6 = (f4 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||
b += 8;
|
||||
}
|
||||
else if (u < UInt64(1ull << 32ull))
|
||||
{
|
||||
auto f0 = UInt64(10 * (1ull << 57ull) / 1e9 + 1) * u;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 57];
|
||||
b -= u < UInt32(1e9);
|
||||
auto f2 = (f0 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 57];
|
||||
auto f4 = (f2 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 57];
|
||||
auto f6 = (f4 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 57];
|
||||
auto f8 = (f6 & mask57) * 100;
|
||||
*reinterpret_cast<pair *>(b + 8) = digits.dd[f8 >> 57];
|
||||
b += 10;
|
||||
}
|
||||
else
|
||||
{
|
||||
p = outDigit(p, u / 100);
|
||||
p = outTwoDigits(p, u % 100);
|
||||
return p;
|
||||
UInt32 y = u % UInt32(1e8);
|
||||
u /= UInt32(1e8);
|
||||
|
||||
// u is 2, 3, or 4 digits (if u < 10 it would have been handled above)
|
||||
if (u < UInt32(1e2))
|
||||
{
|
||||
*reinterpret_cast<pair *>(b) = digits.dd[u];
|
||||
b += 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * u;
|
||||
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||
b -= u < UInt32(1e3);
|
||||
auto f2 = (f0 & mask24) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||
b += 4;
|
||||
}
|
||||
// do 8 digits
|
||||
auto f0 = (UInt64((1ull << 48ull) / 1e6 + 1) * y >> 16) + 1;
|
||||
*reinterpret_cast<pair *>(b) = digits.dd[f0 >> 32];
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
auto f6 = (f4 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||
b += 8;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle unsigned and signed integral operands
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
||||
template <typename U>
|
||||
requires(!std::is_signed_v<U> && std::is_integral_v<U>)
|
||||
ALWAYS_INLINE inline char * itoa(U u, char * p)
|
||||
{
|
||||
return convert::uitoa(p, u);
|
||||
}
|
||||
|
||||
// itoa: handle signed integral operands (selected by SFINAE)
|
||||
template <typename I, size_t N = sizeof(I)>
|
||||
requires(std::is_signed_v<I> && std::is_integral_v<I>)
|
||||
ALWAYS_INLINE inline char * itoa(I i, char * p)
|
||||
{
|
||||
// Need "mask" to be filled with a copy of the sign bit.
|
||||
// If "i" is a negative value, then the result of "operator >>"
|
||||
// is implementation-defined, though usually it is an arithmetic
|
||||
// right shift that replicates the sign bit.
|
||||
// Use a conditional expression to be portable,
|
||||
// a good optimizing compiler generates an arithmetic right shift
|
||||
// and avoids the conditional branch.
|
||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
||||
// Cannot use std::abs() because the result is undefined
|
||||
// in 2's complement systems for the most-negative value.
|
||||
// Want to avoid conditional branch for performance reasons since
|
||||
// CPU branch prediction will be ineffective when negative values
|
||||
// occur randomly.
|
||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
||||
// This yields the absolute value with the desired type without
|
||||
// using a conditional branch and without invoking undefined or
|
||||
// implementation defined behavior:
|
||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
||||
// Unconditionally store a minus sign when producing digits
|
||||
// in a forward direction and increment the pointer only if
|
||||
// the value is in fact negative.
|
||||
// This avoids a conditional branch and is safe because we will
|
||||
// always produce at least one digit and it will overwrite the
|
||||
// minus sign when the value is not negative.
|
||||
*p = '-';
|
||||
p += (mask & 1);
|
||||
p = convert::uitoa(p, u);
|
||||
return p;
|
||||
// do 8 digits
|
||||
auto f0 = (UInt64((1ull << 48ull) / 1e6 + 1) * z >> 16) + 1;
|
||||
*reinterpret_cast<pair *>(b) = digits.dd[f0 >> 32];
|
||||
auto f2 = (f0 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||
auto f4 = (f2 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||
auto f6 = (f4 & mask32) * 100;
|
||||
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||
return b + 8;
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,7 +296,7 @@ ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
||||
{
|
||||
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
|
||||
if (_x.items[UInt128::_impl::little(1)] == 0)
|
||||
return convert::itoa(_x.items[UInt128::_impl::little(0)], p);
|
||||
return jeaiii::to_text_from_integer(p, _x.items[UInt128::_impl::little(0)]);
|
||||
|
||||
/// Doing operations using __int128 is faster and we already rely on this feature
|
||||
using T = unsigned __int128;
|
||||
@ -334,7 +327,7 @@ ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
||||
current_block += max_multiple_of_hundred_blocks;
|
||||
}
|
||||
|
||||
char * highest_part_print = convert::itoa(uint64_t(x), p);
|
||||
char * highest_part_print = jeaiii::to_text_from_integer(p, uint64_t(x));
|
||||
for (int i = 0; i < current_block; i++)
|
||||
{
|
||||
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
|
||||
@ -450,12 +443,12 @@ ALWAYS_INLINE inline char * writeSIntText(T x, char * pos)
|
||||
|
||||
char * itoa(UInt8 i, char * p)
|
||||
{
|
||||
return convert::itoa(uint8_t(i), p);
|
||||
return jeaiii::to_text_from_integer(p, uint8_t(i));
|
||||
}
|
||||
|
||||
char * itoa(Int8 i, char * p)
|
||||
{
|
||||
return convert::itoa(int8_t(i), p);
|
||||
return jeaiii::to_text_from_integer(p, int8_t(i));
|
||||
}
|
||||
|
||||
char * itoa(UInt128 i, char * p)
|
||||
@ -481,7 +474,7 @@ char * itoa(Int256 i, char * p)
|
||||
#define DEFAULT_ITOA(T) \
|
||||
char * itoa(T i, char * p) \
|
||||
{ \
|
||||
return convert::itoa(i, p); \
|
||||
return jeaiii::to_text_from_integer(p, i); \
|
||||
}
|
||||
|
||||
#define FOR_MISSING_INTEGER_TYPES(M) \
|
||||
|
@ -235,8 +235,6 @@ namespace Net
|
||||
/// Note that simply closing a socket is not sufficient
|
||||
/// to be able to re-use it again.
|
||||
|
||||
Poco::Timespan getMaxTimeout();
|
||||
|
||||
private:
|
||||
SecureSocketImpl(const SecureSocketImpl &);
|
||||
SecureSocketImpl & operator=(const SecureSocketImpl &);
|
||||
@ -250,6 +248,9 @@ namespace Net
|
||||
Session::Ptr _pSession;
|
||||
|
||||
friend class SecureStreamSocketImpl;
|
||||
|
||||
Poco::Timespan getMaxTimeoutOrLimit();
|
||||
//// Return max(send, receive) if non zero, otherwise maximum timeout
|
||||
};
|
||||
|
||||
|
||||
|
@ -199,7 +199,7 @@ void SecureSocketImpl::connectSSL(bool performHandshake)
|
||||
if (performHandshake && _pSocket->getBlocking())
|
||||
{
|
||||
int ret;
|
||||
Poco::Timespan remaining_time = getMaxTimeout();
|
||||
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||
do
|
||||
{
|
||||
RemainingTimeCounter counter(remaining_time);
|
||||
@ -302,7 +302,7 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
|
||||
return rc;
|
||||
}
|
||||
|
||||
Poco::Timespan remaining_time = getMaxTimeout();
|
||||
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||
do
|
||||
{
|
||||
RemainingTimeCounter counter(remaining_time);
|
||||
@ -338,7 +338,7 @@ int SecureSocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
return rc;
|
||||
}
|
||||
|
||||
Poco::Timespan remaining_time = getMaxTimeout();
|
||||
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||
do
|
||||
{
|
||||
/// SSL record may consist of several TCP packets,
|
||||
@ -372,7 +372,7 @@ int SecureSocketImpl::completeHandshake()
|
||||
poco_check_ptr (_pSSL);
|
||||
|
||||
int rc;
|
||||
Poco::Timespan remaining_time = getMaxTimeout();
|
||||
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||
do
|
||||
{
|
||||
RemainingTimeCounter counter(remaining_time);
|
||||
@ -453,18 +453,29 @@ X509* SecureSocketImpl::peerCertificate() const
|
||||
return 0;
|
||||
}
|
||||
|
||||
Poco::Timespan SecureSocketImpl::getMaxTimeout()
|
||||
Poco::Timespan SecureSocketImpl::getMaxTimeoutOrLimit()
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||||
Poco::Timespan remaining_time = _pSocket->getReceiveTimeout();
|
||||
Poco::Timespan send_timeout = _pSocket->getSendTimeout();
|
||||
if (remaining_time < send_timeout)
|
||||
remaining_time = send_timeout;
|
||||
/// zero SO_SNDTIMEO/SO_RCVTIMEO works as no timeout, let's replicate this
|
||||
///
|
||||
/// NOTE: we cannot use INT64_MAX (std::numeric_limits<Poco::Timespan::TimeDiff>::max()),
|
||||
/// since it will be later passed to poll() which accept int timeout, and
|
||||
/// even though poll() accepts milliseconds and Timespan() accepts
|
||||
/// microseconds, let's use smaller maximum value just to avoid some possible
|
||||
/// issues, this should be enough anyway (it is ~24 days).
|
||||
if (remaining_time == 0)
|
||||
remaining_time = Poco::Timespan(std::numeric_limits<int>::max());
|
||||
return remaining_time;
|
||||
}
|
||||
|
||||
bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
||||
{
|
||||
if (remaining_time == 0)
|
||||
return false;
|
||||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||||
if (rc <= 0)
|
||||
{
|
||||
@ -475,9 +486,7 @@ bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
||||
case SSL_ERROR_WANT_READ:
|
||||
if (_pSocket->getBlocking())
|
||||
{
|
||||
/// Level-triggered mode of epoll_wait is used, so if SSL_read don't read all available data from socket,
|
||||
/// epoll_wait returns true without waiting for new data even if remaining_time == 0
|
||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_READ) && remaining_time != 0)
|
||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_READ))
|
||||
return true;
|
||||
else
|
||||
throw Poco::TimeoutException();
|
||||
@ -486,13 +495,15 @@ bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
||||
case SSL_ERROR_WANT_WRITE:
|
||||
if (_pSocket->getBlocking())
|
||||
{
|
||||
/// The same as for SSL_ERROR_WANT_READ
|
||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_WRITE) && remaining_time != 0)
|
||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_WRITE))
|
||||
return true;
|
||||
else
|
||||
throw Poco::TimeoutException();
|
||||
}
|
||||
break;
|
||||
/// NOTE: POCO_EINTR is the same as SSL_ERROR_WANT_READ (at least in
|
||||
/// OpenSSL), so this likely dead code, but let's leave it for
|
||||
/// compatibility with other implementations
|
||||
case SSL_ERROR_SYSCALL:
|
||||
return socketError == POCO_EAGAIN || socketError == POCO_EINTR;
|
||||
default:
|
||||
|
@ -253,7 +253,7 @@ function run_tests()
|
||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
|
735
docs/changelogs/v24.6.1.4423-stable.md
Normal file
735
docs/changelogs/v24.6.1.4423-stable.md
Normal file
@ -0,0 +1,735 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.1.4423-stable (dcced7c8478) FIXME as compared to v24.4.1.2088-stable (6d4b31322d1)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Enable asynchronous load of databases and tables by default. See the `async_load_databases` in config.xml. While this change is fully compatible, it can introduce a difference in behavior. When `async_load_databases` is false, as in the previous versions, the server will not accept connections until all tables are loaded. When `async_load_databases` is true, as in the new version, the server can accept connections before all the tables are loaded. If a query is made to a table that is not yet loaded, it will wait for the table's loading, which can take considerable time. It can change the behavior of the server if it is part of a large distributed system under a load balancer. In the first case, the load balancer can get a connection refusal and quickly failover to another server. In the second case, the load balancer can connect to a server that is still loading the tables, and the query will have a higher latency. Moreover, if many queries accumulate in the waiting state, it can lead to a "thundering herd" problem when they start processing simultaneously. This can make a difference only for highly loaded distributed backends. You can set the value of `async_load_databases` to false to avoid this problem. [#57695](https://github.com/ClickHouse/ClickHouse/pull/57695) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Some invalid queries will fail earlier during parsing. Note: disabled the support for inline KQL expressions (the experimental Kusto language) when they are put into a `kql` table function without a string literal, e.g. `kql(garbage | trash)` instead of `kql('garbage | trash')` or `kql($$garbage | trash$$)`. This feature was introduced unintentionally and should not exist. [#61500](https://github.com/ClickHouse/ClickHouse/pull/61500) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Renamed "inverted indexes" to "full-text indexes" which is a less technical / more user-friendly name. This also changes internal table metadata and breaks tables with existing (experimental) inverted indexes. Please make to drop such indexes before upgrade and re-create them after upgrade. [#62884](https://github.com/ClickHouse/ClickHouse/pull/62884) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Usage of functions `neighbor`, `runningAccumulate`, `runningDifferenceStartingWithFirstValue`, `runningDifference` deprecated (because it is error-prone). Proper window functions should be used instead. To enable them back, set `allow_deprecated_functions=1`. [#63132](https://github.com/ClickHouse/ClickHouse/pull/63132) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Queries from `system.columns` will work faster if there is a large number of columns, but many databases or tables are not granted for `SHOW TABLES`. Note that in previous versions, if you grant `SHOW COLUMNS` to individual columns without granting `SHOW TABLES` to the corresponding tables, the `system.columns` table will show these columns, but in a new version, it will skip the table entirely. Remove trace log messages "Access granted" and "Access denied" that slowed down queries. [#63439](https://github.com/ClickHouse/ClickHouse/pull/63439) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Rework parallel processing in `Ordered` mode of storage `S3Queue`. This PR is backward incompatible for Ordered mode if you used settings `s3queue_processing_threads_num` or `s3queue_total_shards_num`. Setting `s3queue_total_shards_num` is deleted, previously it was allowed to use only under `s3queue_allow_experimental_sharded_mode`, which is now deprecated. A new setting is added - `s3queue_buckets`. [#64349](https://github.com/ClickHouse/ClickHouse/pull/64349) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* New functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` were added. Unlike the existing functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake`, the new functions are compatible with function `generateSnowflakeID`, i.e. they accept the snowflake IDs generated by `generateSnowflakeID` and produce snowflake IDs of the same type as `generateSnowflakeID` (i.e. `UInt64`). Furthermore, the new functions default to the UNIX epoch (aka. 1970-01-01), just like `generateSnowflakeID`. If necessary, a different epoch, e.g. Twitter's/X's epoch 2010-11-04 aka. 1288834974657 msec since UNIX epoch, can be passed. The old conversion functions are deprecated and will be removed after a transition period: to use them regardless, enable setting `allow_deprecated_snowflake_conversion_functions`. [#64948](https://github.com/ClickHouse/ClickHouse/pull/64948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### New Feature
|
||||
* Provide support for AzureBlobStorage function in ClickHouse server to use Azure Workload identity to authenticate against Azure blob storage. If `use_workload_identity` parameter is set in config, [workload identity](https://github.com/Azure/azure-sdk-for-cpp/tree/main/sdk/identity/azure-identity#authenticate-azure-hosted-applications) is used for authentication. [#57881](https://github.com/ClickHouse/ClickHouse/pull/57881) ([Vinay Suryadevara](https://github.com/vinay92-ch)).
|
||||
* Introduce bulk loading to StorageEmbeddedRocksDB by creating and ingesting SST file instead of relying on rocksdb build-in memtable. This help to increase importing speed, especially for long-running insert query to StorageEmbeddedRocksDB tables. Also, introduce `StorageEmbeddedRocksDB` table settings. [#59163](https://github.com/ClickHouse/ClickHouse/pull/59163) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Introduce statistics of type "number of distinct values". [#59357](https://github.com/ClickHouse/ClickHouse/pull/59357) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* User can now parse CRLF with TSV format using a setting `input_format_tsv_crlf_end_of_line`. Closes [#56257](https://github.com/ClickHouse/ClickHouse/issues/56257). [#59747](https://github.com/ClickHouse/ClickHouse/pull/59747) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Add Hilbert Curve encode and decode functions. [#60156](https://github.com/ClickHouse/ClickHouse/pull/60156) ([Artem Mustafin](https://github.com/Artemmm91)).
|
||||
* Adds the Form Format to read/write a single record in the application/x-www-form-urlencoded format. [#60199](https://github.com/ClickHouse/ClickHouse/pull/60199) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Added possibility to compress in CROSS JOIN. [#60459](https://github.com/ClickHouse/ClickHouse/pull/60459) ([p1rattttt](https://github.com/p1rattttt)).
|
||||
* New setting `input_format_force_null_for_omitted_fields` that forces NULL values for omitted fields. [#60887](https://github.com/ClickHouse/ClickHouse/pull/60887) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Support join with inequal conditions which involve columns from both left and right table. e.g. `t1.y < t2.y`. To enable, `SET allow_experimental_join_condition = 1`. [#60920](https://github.com/ClickHouse/ClickHouse/pull/60920) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Earlier our s3 storage and s3 table function didn't support selecting from archive files. I created a solution that allows to iterate over files inside archives in S3. [#62259](https://github.com/ClickHouse/ClickHouse/pull/62259) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
* Support for conditional function `clamp`. [#62377](https://github.com/ClickHouse/ClickHouse/pull/62377) ([skyoct](https://github.com/skyoct)).
|
||||
* Add npy output format. [#62430](https://github.com/ClickHouse/ClickHouse/pull/62430) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Added support for reading LINESTRING geometry in WKT format using function `readWKTLineString`. [#62519](https://github.com/ClickHouse/ClickHouse/pull/62519) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Added SQL functions `generateUUIDv7`, `generateUUIDv7ThreadMonotonic`, `generateUUIDv7NonMonotonic` (with different monotonicity/performance trade-offs) to generate version 7 UUIDs aka. timestamp-based UUIDs with random component. Also added a new function `UUIDToNum` to extract bytes from a UUID and a new function `UUIDv7ToDateTime` to extract timestamp component from a UUID version 7. [#62852](https://github.com/ClickHouse/ClickHouse/pull/62852) ([Alexey Petrunyaka](https://github.com/pet74alex)).
|
||||
* Implement Dynamic data type that allows to store values of any type inside it without knowing all of them in advance. Dynamic type is available under a setting `allow_experimental_dynamic_type`. [#63058](https://github.com/ClickHouse/ClickHouse/pull/63058) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to attach parts from a different disk. [#63087](https://github.com/ClickHouse/ClickHouse/pull/63087) ([Unalian](https://github.com/Unalian)).
|
||||
* Allow proxy to be bypassed for hosts specified in `no_proxy` env variable and ClickHouse proxy configuration. [#63314](https://github.com/ClickHouse/ClickHouse/pull/63314) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Introduce bulk loading to StorageEmbeddedRocksDB by creating and ingesting SST file instead of relying on rocksdb build-in memtable. This help to increase importing speed, especially for long-running insert query to StorageEmbeddedRocksDB tables. Also, introduce StorageEmbeddedRocksDB table settings. [#63324](https://github.com/ClickHouse/ClickHouse/pull/63324) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Raw as a synonym for TSVRaw. [#63394](https://github.com/ClickHouse/ClickHouse/pull/63394) ([Unalian](https://github.com/Unalian)).
|
||||
* Added possibility to do cross join in temporary file if size exceeds limits. [#63432](https://github.com/ClickHouse/ClickHouse/pull/63432) ([p1rattttt](https://github.com/p1rattttt)).
|
||||
* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)).
|
||||
* Added new SQL functions `generateSnowflakeID` for generating Twitter-style Snowflake IDs. [#63577](https://github.com/ClickHouse/ClickHouse/pull/63577) ([Danila Puzov](https://github.com/kazalika)).
|
||||
* Add the ability to reshuffle rows during insert to optimize for size without violating the order set by `PRIMARY KEY`. It's controlled by the setting `optimize_row_order` (off by default). [#63578](https://github.com/ClickHouse/ClickHouse/pull/63578) ([Igor Markelov](https://github.com/ElderlyPassionFruit)).
|
||||
* On Linux and MacOS, if the program has STDOUT redirected to a file with a compression extension, use the corresponding compression method instead of nothing (making it behave similarly to `INTO OUTFILE` ). [#63662](https://github.com/ClickHouse/ClickHouse/pull/63662) ([v01dXYZ](https://github.com/v01dXYZ)).
|
||||
* Added `merge_workload` and `mutation_workload` settings to regulate how resources are utilized and shared between merges, mutations and other workloads. [#64061](https://github.com/ClickHouse/ClickHouse/pull/64061) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Change warning on high number of attached tables to differentiate tables, views and dictionaries. [#64180](https://github.com/ClickHouse/ClickHouse/pull/64180) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Add support for comparing IPv4 and IPv6 types using the `=` operator. [#64292](https://github.com/ClickHouse/ClickHouse/pull/64292) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Allow to store named collections in zookeeper. [#64574](https://github.com/ClickHouse/ClickHouse/pull/64574) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support decimal arguments in binary math functions (pow(), atan2(), max2, min2(), hypot(). [#64582](https://github.com/ClickHouse/ClickHouse/pull/64582) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||
* Add support for index analysis over `hilbertEncode`. [#64662](https://github.com/ClickHouse/ClickHouse/pull/64662) ([Artem Mustafin](https://github.com/Artemmm91)).
|
||||
* Added SQL functions `parseReadableSize` (along with `OrNull` and `OrZero` variants). [#64742](https://github.com/ClickHouse/ClickHouse/pull/64742) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Add server settings `max_table_num_to_throw` and `max_database_num_to_throw` to limit the number of databases or tables on `CREATE` queries. [#64781](https://github.com/ClickHouse/ClickHouse/pull/64781) ([Xu Jia](https://github.com/XuJia0210)).
|
||||
* Add _time virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||
* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Skip merging of newly created projection blocks during `INSERT`-s. [#59405](https://github.com/ClickHouse/ClickHouse/pull/59405) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add a native parquet reader, which can read parquet binary to ClickHouse Columns directly. It's controlled by the setting `input_format_parquet_use_native_reader` (disabled by default). [#60361](https://github.com/ClickHouse/ClickHouse/pull/60361) ([ZhiHong Zhang](https://github.com/copperybean)).
|
||||
* Reduce the number of virtual function calls in ColumnNullable::size(). [#60556](https://github.com/ClickHouse/ClickHouse/pull/60556) ([HappenLee](https://github.com/HappenLee)).
|
||||
* Process string functions XXXUTF8 'asciily' if input strings are all ascii chars. Inspired by https://github.com/apache/doris/pull/29799. Overall speed up by 1.07x~1.62x. Notice that peak memory usage had been decreased in some cases. [#61632](https://github.com/ClickHouse/ClickHouse/pull/61632) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improved performance of selection (`{}`) globs in StorageS3. [#62120](https://github.com/ClickHouse/ClickHouse/pull/62120) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* HostResolver has each IP address several times. If remote host has several IPs and by some reason (firewall rules for example) access on some IPs allowed and on others forbidden, than only first record of forbidden IPs marked as failed, and in each try these IPs have a chance to be chosen (and failed again). Even if fix this, every 120 seconds DNS cache dropped, and IPs can be chosen again. [#62652](https://github.com/ClickHouse/ClickHouse/pull/62652) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||
* Speedup `splitByRegexp` when the regular expression argument is a single-character. [#62696](https://github.com/ClickHouse/ClickHouse/pull/62696) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Speed up FixedHashTable by keeping track of the min and max keys used. This allows to reduce the number of cells that need to be verified. [#62746](https://github.com/ClickHouse/ClickHouse/pull/62746) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Add a new configuration`prefer_merge_sort_block_bytes` to control the memory usage and speed up sorting 2 times when merging when there are many columns. [#62904](https://github.com/ClickHouse/ClickHouse/pull/62904) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* `clickhouse-local` will start faster. In previous versions, it was not deleting temporary directories by mistake. Now it will. This closes [#62941](https://github.com/ClickHouse/ClickHouse/issues/62941). [#63074](https://github.com/ClickHouse/ClickHouse/pull/63074) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Micro-optimizations for the new analyzer. [#63429](https://github.com/ClickHouse/ClickHouse/pull/63429) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Index analysis will work if `DateTime` is compared to `DateTime64`. This closes [#63441](https://github.com/ClickHouse/ClickHouse/issues/63441). [#63443](https://github.com/ClickHouse/ClickHouse/pull/63443) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Index analysis will work if `DateTime` is compared to `DateTime64`. This closes [#63441](https://github.com/ClickHouse/ClickHouse/issues/63441). [#63532](https://github.com/ClickHouse/ClickHouse/pull/63532) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize the resolution of in(LowCardinality, ConstantSet). [#64060](https://github.com/ClickHouse/ClickHouse/pull/64060) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Speed up indices of type `set` a little (around 1.5 times) by removing garbage. [#64098](https://github.com/ClickHouse/ClickHouse/pull/64098) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use a thread pool to initialize and destroy hash tables inside `ConcurrentHashJoin`. [#64241](https://github.com/ClickHouse/ClickHouse/pull/64241) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Optimized vertical merges in tables with sparse columns. [#64311](https://github.com/ClickHouse/ClickHouse/pull/64311) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Enabled prefetches of data from remote filesystem during vertical merges. It improves latency of vertical merges in tables with data stored on remote filesystem. [#64314](https://github.com/ClickHouse/ClickHouse/pull/64314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Reduce redundant calls to `isDefault()` of `ColumnSparse::filter` to improve performance. [#64426](https://github.com/ClickHouse/ClickHouse/pull/64426) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Speedup `find_super_nodes` and `find_big_family` keeper-client commands by making multiple asynchronous getChildren requests. [#64628](https://github.com/ClickHouse/ClickHouse/pull/64628) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improve function least/greatest for nullable numberic type arguments. [#64668](https://github.com/ClickHouse/ClickHouse/pull/64668) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Allow merging two consequent `FilterSteps` of a query plan. This improves filter-push-down optimization if the filter condition can be pushed down from the parent step. [#64760](https://github.com/ClickHouse/ClickHouse/pull/64760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove bad optimization in vertical final implementation and re-enable vertical final algorithm by default. [#64783](https://github.com/ClickHouse/ClickHouse/pull/64783) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Remove ALIAS nodes from the filter expression. This slightly improves performance for queries with `PREWHERE` (with new analyzer). [#64793](https://github.com/ClickHouse/ClickHouse/pull/64793) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix performance regression in cross join introduced in [#60459](https://github.com/ClickHouse/ClickHouse/issues/60459) (24.5). [#65243](https://github.com/ClickHouse/ClickHouse/pull/65243) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Improvement
|
||||
* Support empty tuples. [#55061](https://github.com/ClickHouse/ClickHouse/pull/55061) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Hot reload storage policy for distributed tables when adding a new disk. [#58285](https://github.com/ClickHouse/ClickHouse/pull/58285) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Maps can now have `Float32`, `Float64`, `Array(T)`, `Map(K,V)` and `Tuple(T1, T2, ...)` as keys. Closes [#54537](https://github.com/ClickHouse/ClickHouse/issues/54537). [#59318](https://github.com/ClickHouse/ClickHouse/pull/59318) ([李扬](https://github.com/taiyang-li)).
|
||||
* Avoid possible deadlock during MergeTree index analysis when scheduling threads in a saturated service. [#59427](https://github.com/ClickHouse/ClickHouse/pull/59427) ([Sean Haynes](https://github.com/seandhaynes)).
|
||||
* Multiline strings with border preservation and column width change. [#59940](https://github.com/ClickHouse/ClickHouse/pull/59940) ([Volodyachan](https://github.com/Volodyachan)).
|
||||
* Make rabbitmq nack broken messages. Closes [#45350](https://github.com/ClickHouse/ClickHouse/issues/45350). [#60312](https://github.com/ClickHouse/ClickHouse/pull/60312) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support partial trivial count optimization when the query filter is able to select exact ranges from merge tree tables. [#60463](https://github.com/ClickHouse/ClickHouse/pull/60463) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix a crash in asynchronous stack unwinding (such as when using the sampling query profiler) while interpreting debug info. This closes [#60460](https://github.com/ClickHouse/ClickHouse/issues/60460). [#60468](https://github.com/ClickHouse/ClickHouse/pull/60468) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce max memory usage of multithreaded `INSERT`s by collecting chunks of multiple threads in a single transform. [#61047](https://github.com/ClickHouse/ClickHouse/pull/61047) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Distinct messages for s3 error 'no key' for cases disk and storage. [#61108](https://github.com/ClickHouse/ClickHouse/pull/61108) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Less contention in filesystem cache (part 4). Allow to keep filesystem cache not filled to the limit by doing additional eviction in the background (controlled by `keep_free_space_size(elements)_ratio`). This allows to release pressure from space reservation for queries (on `tryReserve` method). Also this is done in a lock free way as much as possible, e.g. should not block normal cache usage. [#61250](https://github.com/ClickHouse/ClickHouse/pull/61250) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* The progress bar will work for trivial queries with LIMIT from `system.zeros`, `system.zeros_mt` (it already works for `system.numbers` and `system.numbers_mt`), and the `generateRandom` table function. As a bonus, if the total number of records is greater than the `max_rows_to_read` limit, it will throw an exception earlier. This closes [#58183](https://github.com/ClickHouse/ClickHouse/issues/58183). [#61823](https://github.com/ClickHouse/ClickHouse/pull/61823) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* YAML Merge Key support. [#62685](https://github.com/ClickHouse/ClickHouse/pull/62685) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enhance error message when non-deterministic function is used with Replicated source. [#62896](https://github.com/ClickHouse/ClickHouse/pull/62896) ([Grégoire Pineau](https://github.com/lyrixx)).
|
||||
* Fix interserver secret for Distributed over Distributed from `remote`. [#63013](https://github.com/ClickHouse/ClickHouse/pull/63013) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow using `clickhouse-local` and its shortcuts `clickhouse` and `ch` with a query or queries file as a positional argument. Examples: `ch "SELECT 1"`, `ch --param_test Hello "SELECT {test:String}"`, `ch query.sql`. This closes [#62361](https://github.com/ClickHouse/ClickHouse/issues/62361). [#63081](https://github.com/ClickHouse/ClickHouse/pull/63081) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support configuration substitutions from YAML files. [#63106](https://github.com/ClickHouse/ClickHouse/pull/63106) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* Reduce the memory usage when using Azure object storage by using fixed memory allocation, avoiding the allocation of an extra buffer. [#63160](https://github.com/ClickHouse/ClickHouse/pull/63160) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add TTL information in system parts_columns table. [#63200](https://github.com/ClickHouse/ClickHouse/pull/63200) ([litlig](https://github.com/litlig)).
|
||||
* Keep previous data in terminal after picking from skim suggestions. [#63261](https://github.com/ClickHouse/ClickHouse/pull/63261) ([FlameFactory](https://github.com/FlameFactory)).
|
||||
* Width of fields now correctly calculate, ignoring ANSI escape sequences. [#63270](https://github.com/ClickHouse/ClickHouse/pull/63270) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Enable plain_rewritable metadata for local and Azure (azure_blob_storage) object storages. [#63365](https://github.com/ClickHouse/ClickHouse/pull/63365) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Support English-style Unicode quotes, e.g. “Hello”, ‘world’. This is questionable in general but helpful when you type your query in a word processor, such as Google Docs. This closes [#58634](https://github.com/ClickHouse/ClickHouse/issues/58634). [#63381](https://github.com/ClickHouse/ClickHouse/pull/63381) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allowed to create MaterializedMySQL database without connection to MySQL. [#63397](https://github.com/ClickHouse/ClickHouse/pull/63397) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Remove copying data when writing to filesystem cache. [#63401](https://github.com/ClickHouse/ClickHouse/pull/63401) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update the usage of error code `NUMBER_OF_ARGUMENTS_DOESNT_MATCH` by more accurate error codes when appropriate. [#63406](https://github.com/ClickHouse/ClickHouse/pull/63406) ([Yohann Jardin](https://github.com/yohannj)).
|
||||
* Several minor corner case fixes to proxy support & tunneling. [#63427](https://github.com/ClickHouse/ClickHouse/pull/63427) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* `os_user` and `client_hostname` are now correctly set up for queries for command line suggestions in clickhouse-client. This closes [#63430](https://github.com/ClickHouse/ClickHouse/issues/63430). [#63433](https://github.com/ClickHouse/ClickHouse/pull/63433) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed tabulation from line numbering, correct handling of length when moving a line if the value has a tab, added tests. [#63493](https://github.com/ClickHouse/ClickHouse/pull/63493) ([Volodyachan](https://github.com/Volodyachan)).
|
||||
* Add this `aggregate_function_group_array_has_limit_size`setting to support discarding data in some scenarios. [#63516](https://github.com/ClickHouse/ClickHouse/pull/63516) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Automatically mark a replica of Replicated database as lost and start recovery if some DDL task fails more than `max_retries_before_automatic_recovery` (100 by default) times in a row with the same error. Also, fixed a bug that could cause skipping DDL entries when an exception is thrown during an early stage of entry execution. [#63549](https://github.com/ClickHouse/ClickHouse/pull/63549) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add `http_response_headers` setting to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
|
||||
* Automatically correct `max_block_size=0` to default value. [#63587](https://github.com/ClickHouse/ClickHouse/pull/63587) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Account failed files in `s3queue_tracked_file_ttl_sec` and `s3queue_traked_files_limit` for `StorageS3Queue`. [#63638](https://github.com/ClickHouse/ClickHouse/pull/63638) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add a build_id ALIAS column to trace_log to facilitate auto renaming upon detecting binary changes. This is to address [#52086](https://github.com/ClickHouse/ClickHouse/issues/52086). [#63656](https://github.com/ClickHouse/ClickHouse/pull/63656) ([Zimu Li](https://github.com/woodlzm)).
|
||||
* Enable truncate operation for object storage disks. [#63693](https://github.com/ClickHouse/ClickHouse/pull/63693) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Improve io_uring resubmits visibility. Rename profile event `IOUringSQEsResubmits` -> `IOUringSQEsResubmitsAsync` and add a new one `IOUringSQEsResubmitsSync`. [#63699](https://github.com/ClickHouse/ClickHouse/pull/63699) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Introduce assertions to verify all functions are called with columns of the right size. [#63723](https://github.com/ClickHouse/ClickHouse/pull/63723) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* The loading of the keywords list is now dependent on the server revision and will be disabled for the old versions of ClickHouse server. CC @azat. [#63786](https://github.com/ClickHouse/ClickHouse/pull/63786) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* `SHOW CREATE TABLE` executed on top of system tables will now show the super handy comment unique for each table which will explain why this table is needed. [#63788](https://github.com/ClickHouse/ClickHouse/pull/63788) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow trailing commas in the columns list in the INSERT query. For example, `INSERT INTO test (a, b, c, ) VALUES ...`. [#63803](https://github.com/ClickHouse/ClickHouse/pull/63803) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better exception messages for the `Regexp` format. [#63804](https://github.com/ClickHouse/ClickHouse/pull/63804) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow trailing commas in the `Values` format. For example, this query is allowed: `INSERT INTO test (a, b, c) VALUES (4, 5, 6,);`. [#63810](https://github.com/ClickHouse/ClickHouse/pull/63810) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Clickhouse disks have to read server setting to obtain actual metadata format version. [#63831](https://github.com/ClickHouse/ClickHouse/pull/63831) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Disable pretty format restrictions (`output_format_pretty_max_rows`/`output_format_pretty_max_value_width`) when stdout is not TTY. [#63942](https://github.com/ClickHouse/ClickHouse/pull/63942) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Exception handling now works when ClickHouse is used inside AWS Lambda. Author: [Alexey Coolnev](https://github.com/acoolnev). [#64014](https://github.com/ClickHouse/ClickHouse/pull/64014) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Throw `CANNOT_DECOMPRESS` instread of `CORRUPTED_DATA` on invalid compressed data passed via HTTP. [#64036](https://github.com/ClickHouse/ClickHouse/pull/64036) ([vdimir](https://github.com/vdimir)).
|
||||
* A tip for a single large number in Pretty formats now works for Nullable and LowCardinality. This closes [#61993](https://github.com/ClickHouse/ClickHouse/issues/61993). [#64084](https://github.com/ClickHouse/ClickHouse/pull/64084) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now backups with azure blob storage will use multicopy. [#64116](https://github.com/ClickHouse/ClickHouse/pull/64116) ([alesapin](https://github.com/alesapin)).
|
||||
* Added a new setting, `metadata_keep_free_space_bytes` to keep free space on the metadata storage disk. [#64128](https://github.com/ClickHouse/ClickHouse/pull/64128) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Add metrics, logs, and thread names around parts filtering with indices. [#64130](https://github.com/ClickHouse/ClickHouse/pull/64130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to use native copy for azure even with different containers. [#64154](https://github.com/ClickHouse/ClickHouse/pull/64154) ([alesapin](https://github.com/alesapin)).
|
||||
* Add metrics to track the number of directories created and removed by the plain_rewritable metadata storage, and the number of entries in the local-to-remote in-memory map. [#64175](https://github.com/ClickHouse/ClickHouse/pull/64175) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Finally enable native copy for azure. [#64182](https://github.com/ClickHouse/ClickHouse/pull/64182) ([alesapin](https://github.com/alesapin)).
|
||||
* Ignore `allow_suspicious_primary_key` on `ATTACH` and verify on `ALTER`. [#64202](https://github.com/ClickHouse/ClickHouse/pull/64202) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The query cache now considers identical queries with different settings as different. This increases robustness in cases where different settings (e.g. `limit` or `additional_table_filters`) would affect the query result. [#64205](https://github.com/ClickHouse/ClickHouse/pull/64205) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Better Exception Message in Delete Table with Projection, users can understand the error and the steps should be taken. [#64212](https://github.com/ClickHouse/ClickHouse/pull/64212) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Support the non standard error code `QpsLimitExceeded` in object storage as a retryable error. [#64225](https://github.com/ClickHouse/ClickHouse/pull/64225) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Forbid converting a MergeTree table to replicated if the zookeeper path for this table already exists. [#64244](https://github.com/ClickHouse/ClickHouse/pull/64244) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* If "replica group" is configured for a `Replicated` database, automatically create a cluster that includes replicas from all groups. [#64312](https://github.com/ClickHouse/ClickHouse/pull/64312) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Added settings to disable materialization of skip indexes and statistics on inserts (`materialize_skip_indexes_on_insert` and `materialize_statistics_on_insert`). [#64391](https://github.com/ClickHouse/ClickHouse/pull/64391) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Use the allocated memory size to calculate the row group size and reduce the peak memory of the parquet writer in single-threaded mode. [#64424](https://github.com/ClickHouse/ClickHouse/pull/64424) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Added new configuration input_format_parquet_prefer_block_bytes to control the average output block bytes, and modified the default value of input_format_parquet_max_block_size to 65409. [#64427](https://github.com/ClickHouse/ClickHouse/pull/64427) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Settings from user config doesn't affect merges and mutations for MergeTree on top of object storage. [#64456](https://github.com/ClickHouse/ClickHouse/pull/64456) ([alesapin](https://github.com/alesapin)).
|
||||
* Setting `replace_long_file_name_to_hash` is enabled by default for `MergeTree` tables. [#64457](https://github.com/ClickHouse/ClickHouse/pull/64457) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Improve the iterator of sparse column to reduce call of size(). [#64497](https://github.com/ClickHouse/ClickHouse/pull/64497) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Update condition to use copy for azure blob storage. [#64518](https://github.com/ClickHouse/ClickHouse/pull/64518) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Support the non standard error code `TotalQpsLimitExceeded` in object storage as a retryable error. [#64520](https://github.com/ClickHouse/ClickHouse/pull/64520) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Optimized memory usage of vertical merges for tables with high number of skip indexes. [#64580](https://github.com/ClickHouse/ClickHouse/pull/64580) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* Add settings `parallel_replicas_custom_key_range_lower` and `parallel_replicas_custom_key_range_upper` to control how parallel replicas with dynamic shards parallelizes queries when using a range filter. [#64604](https://github.com/ClickHouse/ClickHouse/pull/64604) ([josh-hildred](https://github.com/josh-hildred)).
|
||||
* Updated Advanced Dashboard for both open-source and ClickHouse Cloud versions to include a chart for 'Maximum concurrent network connections'. [#64610](https://github.com/ClickHouse/ClickHouse/pull/64610) ([Thom O'Connor](https://github.com/thomoco)).
|
||||
* The second argument (scale) of functions `round()`, `roundBankers()`, `floor()`, `ceil()` and `trunc()` can now be non-const. [#64798](https://github.com/ClickHouse/ClickHouse/pull/64798) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||
* Improve progress report on zeros_mt and generateRandom. [#64804](https://github.com/ClickHouse/ClickHouse/pull/64804) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add an asynchronous metric jemalloc.profile.active to show whether sampling is currently active. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. [#64842](https://github.com/ClickHouse/ClickHouse/pull/64842) ([Unalian](https://github.com/Unalian)).
|
||||
* Support statistics with ReplicatedMergeTree. [#64934](https://github.com/ClickHouse/ClickHouse/pull/64934) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Don't mark of `allow_experimental_join_condition` as IMPORTANT. This may have prevented distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#65716](https://github.com/ClickHouse/ClickHouse/issues/65716): `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added server Asynchronous metrics `DiskGetObjectThrottler*` and `DiskGetObjectThrottler*` reflecting request per second rate limit defined with `s3_max_get_rps` and `s3_max_put_rps` disk settings and currently available number of requests that could be sent without hitting throttling limit on the disk. Metrics are defined for every disk that has a configured limit. [#65050](https://github.com/ClickHouse/ClickHouse/pull/65050) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Initialize global trace collector for Poco::ThreadPool (needed for keeper, etc). [#65239](https://github.com/ClickHouse/ClickHouse/pull/65239) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add validation when creating a user with bcrypt_hash. [#65242](https://github.com/ClickHouse/ClickHouse/pull/65242) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Fix a permission error where a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)).
|
||||
* Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65846](https://github.com/ClickHouse/ClickHouse/issues/65846): Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Closes [#65355](https://github.com/ClickHouse/ClickHouse/issues/65355). Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65714](https://github.com/ClickHouse/ClickHouse/issues/65714): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fix making backup when multiple shards are used. This PR fixes [#56566](https://github.com/ClickHouse/ClickHouse/issues/56566). [#57684](https://github.com/ClickHouse/ClickHouse/pull/57684) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix passing projections/indexes from CREATE query into inner table of MV. [#59183](https://github.com/ClickHouse/ClickHouse/pull/59183) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix boundRatio incorrect merge. [#60532](https://github.com/ClickHouse/ClickHouse/pull/60532) ([Tao Wang](https://github.com/wangtZJU)).
|
||||
* Fix crash when using some functions with low-cardinality columns. [#61966](https://github.com/ClickHouse/ClickHouse/pull/61966) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fixed 'set' skip index not working with IN and indexHint(). [#62083](https://github.com/ClickHouse/ClickHouse/pull/62083) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix queries with FINAL give wrong result when table does not use adaptive granularity. [#62432](https://github.com/ClickHouse/ClickHouse/pull/62432) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Improve the detection of cgroups v2 memory controller in unusual locations. This fixes a warning that the cgroup memory observer was disabled because no cgroups v1 or v2 current memory file could be found. [#62903](https://github.com/ClickHouse/ClickHouse/pull/62903) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix subsequent use of external tables in client. [#62964](https://github.com/ClickHouse/ClickHouse/pull/62964) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash with untuple and unresolved lambda. [#63131](https://github.com/ClickHouse/ClickHouse/pull/63131) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix bug which could lead to server to accept connections before server is actually loaded. [#63181](https://github.com/ClickHouse/ClickHouse/pull/63181) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix a misbehavior when SQL security defaults don't load for old tables during server startup. [#63209](https://github.com/ClickHouse/ClickHouse/pull/63209) ([pufit](https://github.com/pufit)).
|
||||
* JOIN filter push down filled join fix. Closes [#63228](https://github.com/ClickHouse/ClickHouse/issues/63228). [#63234](https://github.com/ClickHouse/ClickHouse/pull/63234) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix infinite loop while listing objects in Azure blob storage. [#63257](https://github.com/ClickHouse/ClickHouse/pull/63257) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* CROSS join can be executed with any value `join_algorithm` setting, close [#62431](https://github.com/ClickHouse/ClickHouse/issues/62431). [#63273](https://github.com/ClickHouse/ClickHouse/pull/63273) ([vdimir](https://github.com/vdimir)).
|
||||
* Fixed a potential crash caused by a `no space left` error when temporary data in the cache is used. [#63346](https://github.com/ClickHouse/ClickHouse/pull/63346) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix bug which could potentially lead to rare LOGICAL_ERROR during SELECT query with message: `Unexpected return type from materialize. Expected type_XXX. Got type_YYY.` Introduced in [#59379](https://github.com/ClickHouse/ClickHouse/issues/59379). [#63353](https://github.com/ClickHouse/ClickHouse/pull/63353) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `X-ClickHouse-Timezone` header returning wrong timezone when using `session_timezone` as query level setting. [#63377](https://github.com/ClickHouse/ClickHouse/pull/63377) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix debug assert when using grouping WITH ROLLUP and LowCardinality types. [#63398](https://github.com/ClickHouse/ClickHouse/pull/63398) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix logical errors in queries with `GROUPING SETS` and `WHERE` and `group_by_use_nulls = true`, close [#60538](https://github.com/ClickHouse/ClickHouse/issues/60538). [#63405](https://github.com/ClickHouse/ClickHouse/pull/63405) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix backup of projection part in case projection was removed from table metadata, but part still has projection. [#63426](https://github.com/ClickHouse/ClickHouse/pull/63426) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix 'Every derived table must have its own alias' error for MYSQL dictionary source, close [#63341](https://github.com/ClickHouse/ClickHouse/issues/63341). [#63481](https://github.com/ClickHouse/ClickHouse/pull/63481) ([vdimir](https://github.com/vdimir)).
|
||||
* Insert QueryFinish on AsyncInsertFlush with no data. [#63483](https://github.com/ClickHouse/ClickHouse/pull/63483) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix `system.query_log.used_dictionaries` logging. [#63487](https://github.com/ClickHouse/ClickHouse/pull/63487) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* Support executing function during assignment of parameterized view value. [#63502](https://github.com/ClickHouse/ClickHouse/pull/63502) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Avoid segafult in `MergeTreePrefetchedReadPool` while fetching projection parts. [#63513](https://github.com/ClickHouse/ClickHouse/pull/63513) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix rabbitmq heap-use-after-free found by clang-18, which can happen if an error is thrown from RabbitMQ during initialization of exchange and queues. [#63515](https://github.com/ClickHouse/ClickHouse/pull/63515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix crash on exit with sentry enabled (due to openssl destroyed before sentry). [#63548](https://github.com/ClickHouse/ClickHouse/pull/63548) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed parquet memory tracking. [#63584](https://github.com/ClickHouse/ClickHouse/pull/63584) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix support for Array and Map with Keyed hashing functions and materialized keys. [#63628](https://github.com/ClickHouse/ClickHouse/pull/63628) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fixed Parquet filter pushdown not working with Analyzer. [#63642](https://github.com/ClickHouse/ClickHouse/pull/63642) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* It is forbidden to convert MergeTree to replicated if the zookeeper path for this table already exists. [#63670](https://github.com/ClickHouse/ClickHouse/pull/63670) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Read only the necessary columns from VIEW (new analyzer). Closes [#62594](https://github.com/ClickHouse/ClickHouse/issues/62594). [#63688](https://github.com/ClickHouse/ClickHouse/pull/63688) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix rare case with missing data in the result of distributed query. [#63691](https://github.com/ClickHouse/ClickHouse/pull/63691) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix [#63539](https://github.com/ClickHouse/ClickHouse/issues/63539). Forbid WINDOW redefinition in new analyzer. [#63694](https://github.com/ClickHouse/ClickHouse/pull/63694) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Flatten_nested is broken with replicated database. [#63695](https://github.com/ClickHouse/ClickHouse/pull/63695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix `SIZES_OF_COLUMNS_DOESNT_MATCH` error for queries with `arrayJoin` function in `WHERE`. Fixes [#63653](https://github.com/ClickHouse/ClickHouse/issues/63653). [#63722](https://github.com/ClickHouse/ClickHouse/pull/63722) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix `Not found column` and `CAST AS Map from array requires nested tuple of 2 elements` exceptions for distributed queries which use `Map(Nothing, Nothing)` type. Fixes [#63637](https://github.com/ClickHouse/ClickHouse/issues/63637). [#63753](https://github.com/ClickHouse/ClickHouse/pull/63753) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible `ILLEGAL_COLUMN` error in `partial_merge` join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#63755](https://github.com/ClickHouse/ClickHouse/pull/63755) ([vdimir](https://github.com/vdimir)).
|
||||
* `query_plan_remove_redundant_distinct` can break queries with WINDOW FUNCTIONS (with `allow_experimental_analyzer` is on). Fixes [#62820](https://github.com/ClickHouse/ClickHouse/issues/62820). [#63776](https://github.com/ClickHouse/ClickHouse/pull/63776) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix possible crash with SYSTEM UNLOAD PRIMARY KEY. [#63778](https://github.com/ClickHouse/ClickHouse/pull/63778) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix a query with a duplicating cycling alias. Fixes [#63320](https://github.com/ClickHouse/ClickHouse/issues/63320). [#63791](https://github.com/ClickHouse/ClickHouse/pull/63791) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed performance degradation of parsing data formats in INSERT query. This closes [#62918](https://github.com/ClickHouse/ClickHouse/issues/62918). This partially reverts [#42284](https://github.com/ClickHouse/ClickHouse/issues/42284), which breaks the original design and introduces more problems. [#63801](https://github.com/ClickHouse/ClickHouse/pull/63801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add 'endpoint_subpath' S3 URI setting to allow plain_rewritable disks to share the same endpoint. [#63806](https://github.com/ClickHouse/ClickHouse/pull/63806) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Fix queries using parallel read buffer (e.g. with max_download_thread > 0) getting stuck when threads cannot be allocated. [#63814](https://github.com/ClickHouse/ClickHouse/pull/63814) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow JOIN filter push down to both streams if only single equivalent column is used in query. Closes [#63799](https://github.com/ClickHouse/ClickHouse/issues/63799). [#63819](https://github.com/ClickHouse/ClickHouse/pull/63819) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Remove the data from all disks after DROP with the Lazy database engines. Without these changes, orhpaned will remain on the disks. [#63848](https://github.com/ClickHouse/ClickHouse/pull/63848) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix incorrect select query result when parallel replicas were used to read from a Materialized View. [#63861](https://github.com/ClickHouse/ClickHouse/pull/63861) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fixes in `find_super_nodes` and `find_big_family` command of keeper-client: - do not fail on ZNONODE errors - find super nodes inside super nodes - properly calculate subtree node count. [#63862](https://github.com/ClickHouse/ClickHouse/pull/63862) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix a error `Database name is empty` for remote queries with lambdas over the cluster with modified default database. Fixes [#63471](https://github.com/ClickHouse/ClickHouse/issues/63471). [#63864](https://github.com/ClickHouse/ClickHouse/pull/63864) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix SIGSEGV due to CPU/Real (`query_profiler_real_time_period_ns`/`query_profiler_cpu_time_period_ns`) profiler (has been an issue since 2022, that leads to periodic server crashes, especially if you were using distributed engine). [#63865](https://github.com/ClickHouse/ClickHouse/pull/63865) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed `EXPLAIN CURRENT TRANSACTION` query. [#63926](https://github.com/ClickHouse/ClickHouse/pull/63926) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix analyzer - IN function with arbitrary deep sub-selects in materialized view to use insertion block. [#63930](https://github.com/ClickHouse/ClickHouse/pull/63930) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Allow `ALTER TABLE .. MODIFY|RESET SETTING` and `ALTER TABLE .. MODIFY COMMENT` for plain_rewritable disk. [#63933](https://github.com/ClickHouse/ClickHouse/pull/63933) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Fix Recursive CTE with distributed queries. Closes [#63790](https://github.com/ClickHouse/ClickHouse/issues/63790). [#63939](https://github.com/ClickHouse/ClickHouse/pull/63939) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fixed reading of columns of type `Tuple(Map(LowCardinality(String), String), ...)`. [#63956](https://github.com/ClickHouse/ClickHouse/pull/63956) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix resolve of unqualified COLUMNS matcher. Preserve the input columns order and forbid usage of unknown identifiers. [#63962](https://github.com/ClickHouse/ClickHouse/pull/63962) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix the `Not found column` error for queries with `skip_unused_shards = 1`, `LIMIT BY`, and the new analyzer. Fixes [#63943](https://github.com/ClickHouse/ClickHouse/issues/63943). [#63983](https://github.com/ClickHouse/ClickHouse/pull/63983) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* (Low-quality third-party Kusto Query Language). Resolve Client Abortion Issue When Using KQL Table Function in Interactive Mode. [#63992](https://github.com/ClickHouse/ClickHouse/pull/63992) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Fix an `Cyclic aliases` error for cyclic aliases of different type (expression and function). [#63993](https://github.com/ClickHouse/ClickHouse/pull/63993) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Deserialize untrusted binary inputs in a safer way. [#64024](https://github.com/ClickHouse/ClickHouse/pull/64024) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Do not throw `Storage doesn't support FINAL` error for remote queries over non-MergeTree tables with `final = true` and new analyzer. Fixes [#63960](https://github.com/ClickHouse/ClickHouse/issues/63960). [#64037](https://github.com/ClickHouse/ClickHouse/pull/64037) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add missing settings to recoverLostReplica. [#64040](https://github.com/ClickHouse/ClickHouse/pull/64040) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix unwind on SIGSEGV on aarch64 (due to small stack for signal). [#64058](https://github.com/ClickHouse/ClickHouse/pull/64058) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline. [#64079](https://github.com/ClickHouse/ClickHouse/pull/64079) ([pufit](https://github.com/pufit)).
|
||||
* Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. [#64096](https://github.com/ClickHouse/ClickHouse/pull/64096) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix azure backup writing multipart blocks as 1mb (read buffer size) instead of max_upload_part_size. [#64117](https://github.com/ClickHouse/ClickHouse/pull/64117) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix creating backups to S3 buckets with different credentials from the disk containing the file. [#64153](https://github.com/ClickHouse/ClickHouse/pull/64153) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. [#64174](https://github.com/ClickHouse/ClickHouse/pull/64174) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. [#64199](https://github.com/ClickHouse/ClickHouse/pull/64199) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Ignore `text_log` config when using Keeper. [#64218](https://github.com/ClickHouse/ClickHouse/pull/64218) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix `duplicate alias` error for distributed queries with `ARRAY JOIN`. [#64226](https://github.com/ClickHouse/ClickHouse/pull/64226) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix unexpected accurateCast from string to integer. [#64255](https://github.com/ClickHouse/ClickHouse/pull/64255) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||
* Fixed CNF simplification, in case any OR group contains mutually exclusive atoms. [#64256](https://github.com/ClickHouse/ClickHouse/pull/64256) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* Fix Query Tree size validation. [#64377](https://github.com/ClickHouse/ClickHouse/pull/64377) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix `Logical error: Bad cast` for `Buffer` table with `PREWHERE`. [#64388](https://github.com/ClickHouse/ClickHouse/pull/64388) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Prevent recursive logging in `blob_storage_log` when it's stored on object storage. [#64393](https://github.com/ClickHouse/ClickHouse/pull/64393) ([vdimir](https://github.com/vdimir)).
|
||||
* Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed `optimize_read_in_order` behaviour for ORDER BY ... NULLS FIRST / LAST on tables with nullable keys. [#64483](https://github.com/ClickHouse/ClickHouse/pull/64483) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed ORC statistics calculation, when writing, for unsigned types on all platforms and Int8 on ARM. [#64563](https://github.com/ClickHouse/ClickHouse/pull/64563) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||
* Fix the output of function `formatDateTimeInJodaSyntax` when a formatter generates an uneven number of characters and the last character is `0`. For example, `SELECT formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D')` now correctly returns `150` instead of previously `15`. [#64614](https://github.com/ClickHouse/ClickHouse/pull/64614) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix type inference for float (in case of small buffer, i.e. `--max_read_buffer_size 1`). [#64641](https://github.com/ClickHouse/ClickHouse/pull/64641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed excessive part elimination by token-based text indexes (`ngrambf` , `full_text`) when filtering by result of `startsWith`, `endsWith`, `match`, `multiSearchAny`. [#64720](https://github.com/ClickHouse/ClickHouse/pull/64720) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* Fixes incorrect behaviour of ANSI CSI escaping in the `UTF8::computeWidth` function. [#64756](https://github.com/ClickHouse/ClickHouse/pull/64756) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Fix a case of incorrect removal of `ORDER BY` / `LIMIT BY` across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix (experimental) unequal join with subqueries for sets which are in the mixed join conditions. [#64775](https://github.com/ClickHouse/ClickHouse/pull/64775) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix crash in a local cache over `plain_rewritable` disk. [#64778](https://github.com/ClickHouse/ClickHouse/pull/64778) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Keeper fix: return correct value for `zk_latest_snapshot_size` in `mntr` command. [#64784](https://github.com/ClickHouse/ClickHouse/pull/64784) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix memory leak in slru cache policy. [#64803](https://github.com/ClickHouse/ClickHouse/pull/64803) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix the `Block structure mismatch` error for queries reading with `PREWHERE` from the materialized view when the materialized view has columns of different types than the source table. Fixes [#64611](https://github.com/ClickHouse/ClickHouse/issues/64611). [#64855](https://github.com/ClickHouse/ClickHouse/pull/64855) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. [#64858](https://github.com/ClickHouse/ClickHouse/pull/64858) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix duplicating `Delete` events in `blob_storage_log` in case of large batch to delete. [#64924](https://github.com/ClickHouse/ClickHouse/pull/64924) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#65544](https://github.com/ClickHouse/ClickHouse/issues/65544): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||
* Fixed `Session moved to another server` error from [Zoo]Keeper that might happen after server startup when the config has includes from [Zoo]Keeper. [#64986](https://github.com/ClickHouse/ClickHouse/pull/64986) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#65582](https://github.com/ClickHouse/ClickHouse/issues/65582): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix `ALTER MODIFY COMMENT` query that was broken for parameterized VIEWs in https://github.com/ClickHouse/ClickHouse/pull/54211. [#65031](https://github.com/ClickHouse/ClickHouse/pull/65031) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix `host_id` in DatabaseReplicated when `cluster_secure_connection` parameter is enabled. Previously all the connections within the cluster created by DatabaseReplicated were not secure, even if the parameter was enabled. [#65054](https://github.com/ClickHouse/ClickHouse/pull/65054) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid writing to finalized buffer in File-like storages. [#65063](https://github.com/ClickHouse/ClickHouse/pull/65063) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible infinite query duration in case of cyclic aliases. Fixes [#64849](https://github.com/ClickHouse/ClickHouse/issues/64849). [#65081](https://github.com/ClickHouse/ClickHouse/pull/65081) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix the `Unknown expression identifier` error for remote queries with `INTERPOLATE (alias)` (new analyzer). Fixes [#64636](https://github.com/ClickHouse/ClickHouse/issues/64636). [#65090](https://github.com/ClickHouse/ClickHouse/pull/65090) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Respond with 5xx instead of 200 OK in case of receive timeout while reading (parts of) the request body from the client socket. [#65118](https://github.com/ClickHouse/ClickHouse/pull/65118) ([Julian Maicher](https://github.com/jmaicher)).
|
||||
* Backported in [#65734](https://github.com/ClickHouse/ClickHouse/issues/65734): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix possible crash for hedged requests. [#65206](https://github.com/ClickHouse/ClickHouse/pull/65206) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
|
||||
* This PR ensures that the type of the constant(IN operator's second parameter) is always visible during the IN operator's type conversion process. Otherwise, losing type information may cause some conversions to fail, such as the conversion from DateTime to Date. fix ([#64487](https://github.com/ClickHouse/ClickHouse/issues/64487)). [#65315](https://github.com/ClickHouse/ClickHouse/pull/65315) ([pn](https://github.com/chloro-pn)).
|
||||
* Backported in [#65665](https://github.com/ClickHouse/ClickHouse/issues/65665): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65606](https://github.com/ClickHouse/ClickHouse/issues/65606): Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#65790](https://github.com/ClickHouse/ClickHouse/issues/65790): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#65814](https://github.com/ClickHouse/ClickHouse/issues/65814): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65830](https://github.com/ClickHouse/ClickHouse/issues/65830): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* ClickHouse is built with clang-18. A lot of new checks from clang-tidy-18 have been enabled. [#60469](https://github.com/ClickHouse/ClickHouse/pull/60469) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make `network` service be required when using the rc init script to start the ClickHouse server daemon. [#60650](https://github.com/ClickHouse/ClickHouse/pull/60650) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||
* Re-enable broken s390x build in CI. [#63135](https://github.com/ClickHouse/ClickHouse/pull/63135) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* The Dockerfile is reviewed by the docker official library in https://github.com/docker-library/official-images/pull/15846. [#63400](https://github.com/ClickHouse/ClickHouse/pull/63400) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Information about every symbol in every translation unit will be collected in the CI database for every build in the CI. This closes [#63494](https://github.com/ClickHouse/ClickHouse/issues/63494). [#63495](https://github.com/ClickHouse/ClickHouse/pull/63495) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Experimentally support loongarch64 as a new platform for ClickHouse. [#63733](https://github.com/ClickHouse/ClickHouse/pull/63733) ([qiangxuhui](https://github.com/qiangxuhui)).
|
||||
* Update Apache Datasketches library. It resolves [#63858](https://github.com/ClickHouse/ClickHouse/issues/63858). [#63923](https://github.com/ClickHouse/ClickHouse/pull/63923) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable GRPC support for aarch64 linux while cross-compiling binary. [#64072](https://github.com/ClickHouse/ClickHouse/pull/64072) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix typo in test_hdfsCluster_unset_skip_unavailable_shards. The test writes data to unskip_unavailable_shards, but uses skip_unavailable_shards from the previous test. [#64243](https://github.com/ClickHouse/ClickHouse/pull/64243) ([Mikhail Artemenko](https://github.com/Michicosun)).
|
||||
* Reduce the size of some slow tests. [#64387](https://github.com/ClickHouse/ClickHouse/pull/64387) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Reduce the size of some slow tests. [#64452](https://github.com/ClickHouse/ClickHouse/pull/64452) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix test_lost_part_other_replica. [#64512](https://github.com/ClickHouse/ClickHouse/pull/64512) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add tests for experimental unequal joins and randomize new settings in clickhouse-test. [#64535](https://github.com/ClickHouse/ClickHouse/pull/64535) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* Upgrade tests: Update config and work with release candidates. [#64542](https://github.com/ClickHouse/ClickHouse/pull/64542) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add support for LLVM XRay. [#64592](https://github.com/ClickHouse/ClickHouse/pull/64592) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Speed up 02995_forget_partition. [#64761](https://github.com/ClickHouse/ClickHouse/pull/64761) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix 02790_async_queries_in_query_log. [#64764](https://github.com/ClickHouse/ClickHouse/pull/64764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support LLVM XRay on Linux amd64 only. [#64837](https://github.com/ClickHouse/ClickHouse/pull/64837) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Get rid of custom code in `tests/ci/download_release_packages.py` and `tests/ci/get_previous_release_tag.py` to avoid issues after the https://github.com/ClickHouse/ClickHouse/pull/64759 is merged. [#64848](https://github.com/ClickHouse/ClickHouse/pull/64848) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#65568](https://github.com/ClickHouse/ClickHouse/issues/65568):. [#65498](https://github.com/ClickHouse/ClickHouse/pull/65498) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Backported in [#65693](https://github.com/ClickHouse/ClickHouse/issues/65693):. [#65686](https://github.com/ClickHouse/ClickHouse/pull/65686) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Do not remove server constants from GROUP BY key for secondary query."'. [#63297](https://github.com/ClickHouse/ClickHouse/pull/63297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Introduce bulk loading to StorageEmbeddedRocksDB"'. [#63316](https://github.com/ClickHouse/ClickHouse/pull/63316) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Do not remove server constants from GROUP BY key for secondary query.""'. [#63415](https://github.com/ClickHouse/ClickHouse/pull/63415) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* NO CL ENTRY: 'Revert "Fix index analysis for `DateTime64`"'. [#63525](https://github.com/ClickHouse/ClickHouse/pull/63525) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* NO CL ENTRY: 'Revert "Update gui.md - Add ch-ui to open-source available tools."'. [#64064](https://github.com/ClickHouse/ClickHouse/pull/64064) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Prevent conversion to Replicated if zookeeper path already exists"'. [#64214](https://github.com/ClickHouse/ClickHouse/pull/64214) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* NO CL ENTRY: 'Revert "Refactoring of Server.h: Isolate server management from other logic"'. [#64425](https://github.com/ClickHouse/ClickHouse/pull/64425) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Remove some unnecessary `UNREACHABLE`s"'. [#64430](https://github.com/ClickHouse/ClickHouse/pull/64430) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "CI: fix build_report selection in case of job reuse"'. [#64516](https://github.com/ClickHouse/ClickHouse/pull/64516) ([Max K.](https://github.com/maxknv)).
|
||||
* NO CL ENTRY: 'Revert "Revert "CI: fix build_report selection in case of job reuse""'. [#64531](https://github.com/ClickHouse/ClickHouse/pull/64531) ([Max K.](https://github.com/maxknv)).
|
||||
* NO CL ENTRY: 'Revert "Add `fromReadableSize` function"'. [#64616](https://github.com/ClickHouse/ClickHouse/pull/64616) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* NO CL ENTRY: 'Update CHANGELOG.md'. [#64816](https://github.com/ClickHouse/ClickHouse/pull/64816) ([Paweł Kudzia](https://github.com/pakud)).
|
||||
* NO CL ENTRY: 'Revert "Reduce lock contention for MergeTree tables (by renaming parts without holding lock)"'. [#64899](https://github.com/ClickHouse/ClickHouse/pull/64899) ([alesapin](https://github.com/alesapin)).
|
||||
* NO CL ENTRY: 'Revert "Add dynamic untracked memory limits for more precise memory tracking"'. [#64969](https://github.com/ClickHouse/ClickHouse/pull/64969) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* NO CL ENTRY: 'Revert "Fix duplicating Delete events in blob_storage_log"'. [#65049](https://github.com/ClickHouse/ClickHouse/pull/65049) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Fix duplicating Delete events in blob_storage_log""'. [#65053](https://github.com/ClickHouse/ClickHouse/pull/65053) ([vdimir](https://github.com/vdimir)).
|
||||
* NO CL ENTRY: 'Revert "S3: reduce retires time for queries, increase retries count for backups"'. [#65148](https://github.com/ClickHouse/ClickHouse/pull/65148) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* NO CL ENTRY: 'Revert "Small fix for 02340_parts_refcnt_mergetree"'. [#65149](https://github.com/ClickHouse/ClickHouse/pull/65149) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* NO CL ENTRY: 'Revert "Change default s3_throw_on_zero_files_match to true, document that presigned S3 URLs are not supported"'. [#65250](https://github.com/ClickHouse/ClickHouse/pull/65250) ([Max K.](https://github.com/maxknv)).
|
||||
* NO CL ENTRY: 'Revert "Fix AWS ECS"'. [#65361](https://github.com/ClickHouse/ClickHouse/pull/65361) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Try abort on current thread join. [#42544](https://github.com/ClickHouse/ClickHouse/pull/42544) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* This change was reverted. [#51008](https://github.com/ClickHouse/ClickHouse/pull/51008) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Analyzer fuzzer 2. [#57098](https://github.com/ClickHouse/ClickHouse/pull/57098) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Analyzer fuzzer 4. [#57101](https://github.com/ClickHouse/ClickHouse/pull/57101) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Check python code with flake8. [#58349](https://github.com/ClickHouse/ClickHouse/pull/58349) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Unite s3/hdfs/azure storage implementations into a single class working with IObjectStorage. Same for *Cluster, data lakes and Queue storages. [#59767](https://github.com/ClickHouse/ClickHouse/pull/59767) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove http_max_chunk_size setting (too internal). [#60852](https://github.com/ClickHouse/ClickHouse/pull/60852) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix race in refreshable materialized views causing SELECT to fail sometimes. [#60883](https://github.com/ClickHouse/ClickHouse/pull/60883) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Refactor KeyCondition and key analysis to improve PartitionPruner and trivial count optimization. This is separated from [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463) . [#61459](https://github.com/ClickHouse/ClickHouse/pull/61459) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Implement cumulative A Sync status. [#61464](https://github.com/ClickHouse/ClickHouse/pull/61464) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Parallel replicas: table check failover. [#61935](https://github.com/ClickHouse/ClickHouse/pull/61935) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* This change was reverted. [#61973](https://github.com/ClickHouse/ClickHouse/pull/61973) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid crashing on column type mismatch in a few dozen places. [#62087](https://github.com/ClickHouse/ClickHouse/pull/62087) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix optimize_if_chain_to_multiif const NULL handling. [#62104](https://github.com/ClickHouse/ClickHouse/pull/62104) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Use intrusive lists for `ResourceRequest` instead of deque. [#62165](https://github.com/ClickHouse/ClickHouse/pull/62165) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Analyzer: Fix validateAggregates for tables with different aliases. [#62346](https://github.com/ClickHouse/ClickHouse/pull/62346) ([vdimir](https://github.com/vdimir)).
|
||||
* Improve code and tests of `DROP` of multiple tables. [#62359](https://github.com/ClickHouse/ClickHouse/pull/62359) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Fix exception message during writing to partitioned s3/hdfs/azure path with globs. [#62423](https://github.com/ClickHouse/ClickHouse/pull/62423) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support UBSan on Clang-19 (master). [#62466](https://github.com/ClickHouse/ClickHouse/pull/62466) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Replay ZK logs using keeper-bench. [#62481](https://github.com/ClickHouse/ClickHouse/pull/62481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Save the stacktrace of thread waiting on failing AsyncLoader job. [#62719](https://github.com/ClickHouse/ClickHouse/pull/62719) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* group_by_use_nulls strikes back. [#62922](https://github.com/ClickHouse/ClickHouse/pull/62922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Analyzer: prefer column name to alias from array join. [#62995](https://github.com/ClickHouse/ClickHouse/pull/62995) ([vdimir](https://github.com/vdimir)).
|
||||
* CI: try separate the workflows file for GitHub's Merge Queue. [#63123](https://github.com/ClickHouse/ClickHouse/pull/63123) ([Max K.](https://github.com/maxknv)).
|
||||
* Try to fix coverage tests. [#63130](https://github.com/ClickHouse/ClickHouse/pull/63130) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix azure backup flaky test. [#63158](https://github.com/ClickHouse/ClickHouse/pull/63158) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Merging [#60920](https://github.com/ClickHouse/ClickHouse/issues/60920). [#63159](https://github.com/ClickHouse/ClickHouse/pull/63159) ([vdimir](https://github.com/vdimir)).
|
||||
* QueryAnalysisPass improve QUALIFY validation. [#63162](https://github.com/ClickHouse/ClickHouse/pull/63162) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add numpy tests for different endianness. [#63189](https://github.com/ClickHouse/ClickHouse/pull/63189) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Clean the `_work` directory between runner's launches. Fallback to auto-update actions runner if it fails to start. Make the `init-network.sh` sourceable and executable. [#63195](https://github.com/ClickHouse/ClickHouse/pull/63195) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add ability to run Azure tests in PR with label. [#63196](https://github.com/ClickHouse/ClickHouse/pull/63196) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix possible endless loop while reading from azure. [#63197](https://github.com/ClickHouse/ClickHouse/pull/63197) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add information about materialized view security bug fix into the changelog. [#63204](https://github.com/ClickHouse/ClickHouse/pull/63204) ([pufit](https://github.com/pufit)).
|
||||
* Disable one test from 02994_sanity_check_settings. [#63208](https://github.com/ClickHouse/ClickHouse/pull/63208) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Enable custom parquet encoder by default, attempt 2. [#63210](https://github.com/ClickHouse/ClickHouse/pull/63210) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Update version after release. [#63215](https://github.com/ClickHouse/ClickHouse/pull/63215) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v24.4.1.2088-stable. [#63217](https://github.com/ClickHouse/ClickHouse/pull/63217) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.3.3.102-lts. [#63226](https://github.com/ClickHouse/ClickHouse/pull/63226) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.2.3.70-stable. [#63227](https://github.com/ClickHouse/ClickHouse/pull/63227) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Return back [#61551](https://github.com/ClickHouse/ClickHouse/issues/61551) (More optimal loading of marks). [#63233](https://github.com/ClickHouse/ClickHouse/pull/63233) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Hide CI options under a spoiler. [#63237](https://github.com/ClickHouse/ClickHouse/pull/63237) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add azure run with msan. [#63238](https://github.com/ClickHouse/ClickHouse/pull/63238) ([alesapin](https://github.com/alesapin)).
|
||||
* Now syntax for this command is following: `TRUNCATE ALL TABLES FROM [IF EXISTS] <database_name>`. [#63241](https://github.com/ClickHouse/ClickHouse/pull/63241) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Minor follow-up to a renaming PR. [#63260](https://github.com/ClickHouse/ClickHouse/pull/63260) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Followup for [#62613](https://github.com/ClickHouse/ClickHouse/issues/62613) Adding back checks similar to these: https://github.com/ClickHouse/ClickHouse/pull/62613/files#diff-70859078da57ecdfc66d26f732c0d7718d269e82bdc80e62b39f5ffeab36c05bL99 https://github.com/ClickHouse/ClickHouse/pull/62613/files#diff-70859078da57ecdfc66d26f732c0d7718d269e82bdc80e62b39f5ffeab36c05bL144-L149. [#63274](https://github.com/ClickHouse/ClickHouse/pull/63274) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* This setting was added in 24.5, not 24.4. [#63278](https://github.com/ClickHouse/ClickHouse/pull/63278) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improve cloud backport script. [#63282](https://github.com/ClickHouse/ClickHouse/pull/63282) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update version_date.tsv and changelogs after v23.8.14.6-lts. [#63285](https://github.com/ClickHouse/ClickHouse/pull/63285) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix azure flaky test. [#63286](https://github.com/ClickHouse/ClickHouse/pull/63286) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix deadlock in `CacheDictionaryUpdateQueue` in case of exception in constructor. [#63287](https://github.com/ClickHouse/ClickHouse/pull/63287) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* DiskApp: fix 'list --recursive /' and crash on invalid arguments. [#63296](https://github.com/ClickHouse/ClickHouse/pull/63296) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix terminate because of unhandled exception in `MergeTreeDeduplicationLog::shutdown`. [#63298](https://github.com/ClickHouse/ClickHouse/pull/63298) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Move s3_plain_rewritable unit test to shell. [#63317](https://github.com/ClickHouse/ClickHouse/pull/63317) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Add tests for [#63264](https://github.com/ClickHouse/ClickHouse/issues/63264). [#63321](https://github.com/ClickHouse/ClickHouse/pull/63321) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Try fix segfault in `MergeTreeReadPoolBase::createTask`. [#63323](https://github.com/ClickHouse/ClickHouse/pull/63323) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Reduce time-to-insert profiling data in case of logs cluster issues. [#63325](https://github.com/ClickHouse/ClickHouse/pull/63325) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update README.md. [#63326](https://github.com/ClickHouse/ClickHouse/pull/63326) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* This should fix failures with error like `Permission denied ["/var/lib/clickhouse/disks/s3/store/364/3643ff83-0996-4a4a-a90b-a96e66a10c74"]` when table dir was chmod-ed by DatabaseCatalog. [#63330](https://github.com/ClickHouse/ClickHouse/pull/63330) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Use `/commit/` to have the URLs in [reports](https://play.clickhouse.com/play?user=play#c2VsZWN0IGRpc3RpbmN0IGNvbW1pdF91cmwgZnJvbSBjaGVja3Mgd2hlcmUgY2hlY2tfc3RhcnRfdGltZSA+PSBub3coKSAtIGludGVydmFsIDEgbW9udGggYW5kIHB1bGxfcmVxdWVzdF9udW1iZXI9NjA1MzI=) like https://github.com/ClickHouse/ClickHouse/commit/44f8bc5308b53797bec8cccc3bd29fab8a00235d and not like https://github.com/ClickHouse/ClickHouse/commits/44f8bc5308b53797bec8cccc3bd29fab8a00235d. [#63331](https://github.com/ClickHouse/ClickHouse/pull/63331) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add test for [#56287](https://github.com/ClickHouse/ClickHouse/issues/56287). [#63340](https://github.com/ClickHouse/ClickHouse/pull/63340) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update README.md. [#63350](https://github.com/ClickHouse/ClickHouse/pull/63350) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Add test for [#48049](https://github.com/ClickHouse/ClickHouse/issues/48049). [#63351](https://github.com/ClickHouse/ClickHouse/pull/63351) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add option `query_id_prefix` to `clickhouse-benchmark`. [#63352](https://github.com/ClickHouse/ClickHouse/pull/63352) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* New version is fantatish (at least with Ubuntu 22.04.4 LTS): ``` azurite --version /usr/local/lib/node_modules/azurite/dist/src/common/persistence/MemoryExtentStore.js:53 return this._chunks.get(categoryName)?.chunks.get(id); ^. [#63354](https://github.com/ClickHouse/ClickHouse/pull/63354) ([alesapin](https://github.com/alesapin)).
|
||||
* Randomize setting `enable_block_offset_column` in stress tests. [#63355](https://github.com/ClickHouse/ClickHouse/pull/63355) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix AST parsing of invalid type names. [#63357](https://github.com/ClickHouse/ClickHouse/pull/63357) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix some 00002_log_and_exception_messages_formatting flakiness. [#63358](https://github.com/ClickHouse/ClickHouse/pull/63358) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Add tags for the test 03000_traverse_shadow_system_data_paths.sql to make it stable. [#63366](https://github.com/ClickHouse/ClickHouse/pull/63366) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Add a test for [#55655](https://github.com/ClickHouse/ClickHouse/issues/55655). [#63380](https://github.com/ClickHouse/ClickHouse/pull/63380) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data race in `reportBrokenPart`. [#63396](https://github.com/ClickHouse/ClickHouse/pull/63396) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Workaround for `oklch()` inside canvas bug for firefox. [#63404](https://github.com/ClickHouse/ClickHouse/pull/63404) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add test for issue [#47862](https://github.com/ClickHouse/ClickHouse/issues/47862). [#63424](https://github.com/ClickHouse/ClickHouse/pull/63424) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix parsing of `CREATE INDEX` query. [#63425](https://github.com/ClickHouse/ClickHouse/pull/63425) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* We are using Shared Catalog in the CI Logs cluster. [#63442](https://github.com/ClickHouse/ClickHouse/pull/63442) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix collection of coverage data in the CI Logs cluster. [#63453](https://github.com/ClickHouse/ClickHouse/pull/63453) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test for rocksdb bulk sink. [#63457](https://github.com/ClickHouse/ClickHouse/pull/63457) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Extra constraints for stress and fuzzer tests. [#63470](https://github.com/ClickHouse/ClickHouse/pull/63470) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* io_uring: refactor get reader from context. [#63475](https://github.com/ClickHouse/ClickHouse/pull/63475) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Analyzer setting max_streams_to_max_threads_ratio overflow fix. [#63478](https://github.com/ClickHouse/ClickHouse/pull/63478) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Provides setting `output_format_pretty_preserve_border_for_multiline_string` which allows to render multiline strings in pretty format better. The default value for this setting is true. [#63479](https://github.com/ClickHouse/ClickHouse/pull/63479) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix logical error when reloading config with customly created web disk broken after [#56367](https://github.com/ClickHouse/ClickHouse/issues/56367). [#63484](https://github.com/ClickHouse/ClickHouse/pull/63484) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add test for [#49307](https://github.com/ClickHouse/ClickHouse/issues/49307). [#63486](https://github.com/ClickHouse/ClickHouse/pull/63486) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Remove leftovers of GCC support in cmake rules. [#63488](https://github.com/ClickHouse/ClickHouse/pull/63488) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix ProfileEventTimeIncrement code. [#63489](https://github.com/ClickHouse/ClickHouse/pull/63489) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* MergeTreePrefetchedReadPool: Print parent name when logging projection parts. [#63522](https://github.com/ClickHouse/ClickHouse/pull/63522) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Correctly stop `asyncCopy` tasks in all cases. [#63523](https://github.com/ClickHouse/ClickHouse/pull/63523) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Almost everything should work on AArch64 (Part of [#58061](https://github.com/ClickHouse/ClickHouse/issues/58061)). [#63527](https://github.com/ClickHouse/ClickHouse/pull/63527) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update randomization of `old_parts_lifetime`. [#63530](https://github.com/ClickHouse/ClickHouse/pull/63530) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update 02240_system_filesystem_cache_table.sh. [#63531](https://github.com/ClickHouse/ClickHouse/pull/63531) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix data race in `DistributedSink`. [#63538](https://github.com/ClickHouse/ClickHouse/pull/63538) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix azure tests run on master. [#63540](https://github.com/ClickHouse/ClickHouse/pull/63540) ([alesapin](https://github.com/alesapin)).
|
||||
* The commit 2b8254f987a65d5c21d74fe67b4ee9757970466e was not synced into the cloud because it was falsely marked as a success by `upstream_pr.head.sha`. Here we'll try our best to find a proper commit, and won't make anything if we can't. [#63543](https://github.com/ClickHouse/ClickHouse/pull/63543) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add `no-s3-storage` tag to local_plain_rewritable ut. [#63546](https://github.com/ClickHouse/ClickHouse/pull/63546) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Add `jwcrypto` to integration tests runner. [#63551](https://github.com/ClickHouse/ClickHouse/pull/63551) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Go back to upstream lz4. [#63574](https://github.com/ClickHouse/ClickHouse/pull/63574) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix logical error in ColumnTuple::tryInsert(). [#63583](https://github.com/ClickHouse/ClickHouse/pull/63583) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* harmonize sumMap error messages on ILLEGAL_TYPE_OF_ARGUMENT. [#63619](https://github.com/ClickHouse/ClickHouse/pull/63619) ([Yohann Jardin](https://github.com/yohannj)).
|
||||
* Refactor data part writer to remove dependencies on MergeTreeData and DataPart. [#63620](https://github.com/ClickHouse/ClickHouse/pull/63620) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Update README.md. [#63631](https://github.com/ClickHouse/ClickHouse/pull/63631) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Ignore global profiler if system.trace_log is not enabled and fix really disable it for keeper standalone build. [#63632](https://github.com/ClickHouse/ClickHouse/pull/63632) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixes for 00002_log_and_exception_messages_formatting. [#63634](https://github.com/ClickHouse/ClickHouse/pull/63634) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix 02362_part_log_merge_algorithm flaky test. [#63635](https://github.com/ClickHouse/ClickHouse/pull/63635) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Fix tests flakiness due to long SYSTEM FLUSH LOGS (explicitly specify old_parts_lifetime). [#63639](https://github.com/ClickHouse/ClickHouse/pull/63639) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update clickhouse-test help section. [#63663](https://github.com/ClickHouse/ClickHouse/pull/63663) ([Ali](https://github.com/xogoodnow)).
|
||||
* Fix bad test `02950_part_log_bytes_uncompressed`. [#63672](https://github.com/ClickHouse/ClickHouse/pull/63672) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove leftovers of `optimize_monotonous_functions_in_order_by`. [#63674](https://github.com/ClickHouse/ClickHouse/pull/63674) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* tests: attempt to fix 02340_parts_refcnt_mergetree flakiness. [#63684](https://github.com/ClickHouse/ClickHouse/pull/63684) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Parallel replicas: simple cleanup. [#63685](https://github.com/ClickHouse/ClickHouse/pull/63685) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Cancel S3 reads properly when parallel reads are used. [#63687](https://github.com/ClickHouse/ClickHouse/pull/63687) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Explaining insertion order of the Map datatype. [#63690](https://github.com/ClickHouse/ClickHouse/pull/63690) ([Mark Needham](https://github.com/mneedham)).
|
||||
* selectRangesToRead() simple cleanup. [#63692](https://github.com/ClickHouse/ClickHouse/pull/63692) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzed analyzer_join_with_constant query. [#63702](https://github.com/ClickHouse/ClickHouse/pull/63702) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add missing explicit instantiations of ColumnUnique. [#63718](https://github.com/ClickHouse/ClickHouse/pull/63718) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Better asserts in ColumnString.h. [#63719](https://github.com/ClickHouse/ClickHouse/pull/63719) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Try to fix flaky s3 tests test_seekable_formats and test_seekable_formats_url. [#63720](https://github.com/ClickHouse/ClickHouse/pull/63720) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't randomize some settings in 02941_variant_type_* tests to avoid timeouts. [#63721](https://github.com/ClickHouse/ClickHouse/pull/63721) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix flaky 03145_non_loaded_projection_backup.sh. [#63728](https://github.com/ClickHouse/ClickHouse/pull/63728) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Userspace page cache: don't collect stats if cache is unused. [#63730](https://github.com/ClickHouse/ClickHouse/pull/63730) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix insignificant UBSAN error in QueryAnalyzer::replaceNodesWithPositionalArguments(). [#63734](https://github.com/ClickHouse/ClickHouse/pull/63734) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix a bug in resolving matcher inside lambda inside ARRAY JOIN. [#63744](https://github.com/ClickHouse/ClickHouse/pull/63744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Self explanatory. [#63754](https://github.com/ClickHouse/ClickHouse/pull/63754) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Do not hide disk name. [#63756](https://github.com/ClickHouse/ClickHouse/pull/63756) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* CI: remove Cancel and Debug workflows as redundant. [#63757](https://github.com/ClickHouse/ClickHouse/pull/63757) ([Max K.](https://github.com/maxknv)).
|
||||
* Security Policy: Add notification process. [#63773](https://github.com/ClickHouse/ClickHouse/pull/63773) ([Leticia Webb](https://github.com/leticiawebb)).
|
||||
* Fix typo. [#63774](https://github.com/ClickHouse/ClickHouse/pull/63774) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix fuzzer when only explicit faults are used. [#63775](https://github.com/ClickHouse/ClickHouse/pull/63775) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Settings typo. [#63782](https://github.com/ClickHouse/ClickHouse/pull/63782) ([Rory Crispin](https://github.com/RoryCrispin)).
|
||||
* Ref. [#63479](https://github.com/ClickHouse/ClickHouse/issues/63479). [#63783](https://github.com/ClickHouse/ClickHouse/pull/63783) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix test_odbc_interaction from aarch64 [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63787](https://github.com/ClickHouse/ClickHouse/pull/63787) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix test `test_catboost_evaluate` for aarch64. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63789](https://github.com/ClickHouse/ClickHouse/pull/63789) ([alesapin](https://github.com/alesapin)).
|
||||
* Rewrite plan for parallel replicas in Planner. [#63796](https://github.com/ClickHouse/ClickHouse/pull/63796) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Follow-up for the `binary_symbols` table in CI. [#63802](https://github.com/ClickHouse/ClickHouse/pull/63802) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support INSERT with VALUES in the ANTLR syntax file. [#63811](https://github.com/ClickHouse/ClickHouse/pull/63811) ([GG Bond](https://github.com/zzyReal666)).
|
||||
* Fix race in `ReplicatedMergeTreeLogEntryData`. [#63816](https://github.com/ClickHouse/ClickHouse/pull/63816) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow allocation during job destructor in `ThreadPool`. [#63829](https://github.com/ClickHouse/ClickHouse/pull/63829) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Remove HDFS from disks config for one integration test for arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63832](https://github.com/ClickHouse/ClickHouse/pull/63832) ([alesapin](https://github.com/alesapin)).
|
||||
* io_uring: add basic io_uring clickhouse perf test. [#63835](https://github.com/ClickHouse/ClickHouse/pull/63835) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Bump version for old image in test_short_strings_aggregation to make it work on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63836](https://github.com/ClickHouse/ClickHouse/pull/63836) ([alesapin](https://github.com/alesapin)).
|
||||
* fix typo. [#63838](https://github.com/ClickHouse/ClickHouse/pull/63838) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Disable test `test_non_default_compression/test.py::test_preconfigured_deflateqpl_codec` on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63839](https://github.com/ClickHouse/ClickHouse/pull/63839) ([alesapin](https://github.com/alesapin)).
|
||||
* This PR was reverted. [#63857](https://github.com/ClickHouse/ClickHouse/pull/63857) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Remove unnecessary logging statements in MergeJoinTransform.cpp. [#63860](https://github.com/ClickHouse/ClickHouse/pull/63860) ([vdimir](https://github.com/vdimir)).
|
||||
* Temporary disables 3 integration tcs on arm until https://github.com/clickhouse/clickhouse/issues/63855 is resolved. [#63867](https://github.com/ClickHouse/ClickHouse/pull/63867) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix some settings values in 02455_one_row_from_csv_memory_usage test to make it less flaky. [#63874](https://github.com/ClickHouse/ClickHouse/pull/63874) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Randomise `allow_experimental_parallel_reading_from_replicas` in stress tests. [#63899](https://github.com/ClickHouse/ClickHouse/pull/63899) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix logs test for binary data by converting it to a valid UTF8 string. [#63909](https://github.com/ClickHouse/ClickHouse/pull/63909) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* More sanity checks for parallel replicas. [#63910](https://github.com/ClickHouse/ClickHouse/pull/63910) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Include checks like `Stateless tests (asan, distributed cache, meta storage in keeper, s3 storage) [2/3]` in `Mergeable Check` and `A Sync`. [#63945](https://github.com/ClickHouse/ClickHouse/pull/63945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Insignificant libunwind build fixes. [#63946](https://github.com/ClickHouse/ClickHouse/pull/63946) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert multiline pretty changes due to performance problems. [#63947](https://github.com/ClickHouse/ClickHouse/pull/63947) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Some usability improvements for c++expr script. [#63948](https://github.com/ClickHouse/ClickHouse/pull/63948) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix 02124_insert_deduplication_token_multiple_blocks. [#63950](https://github.com/ClickHouse/ClickHouse/pull/63950) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* CI: aarch64: disable arm integration tests with kerberaized kafka. [#63961](https://github.com/ClickHouse/ClickHouse/pull/63961) ([Max K.](https://github.com/maxknv)).
|
||||
* Make events like [timeouts](https://play.clickhouse.com/play?user=play#U0VMRUNUICogRlJPTSBjaGVja3MgV0hFUkUgdGVzdF9uYW1lID09ICdDaGVjayB0aW1lb3V0IGV4cGlyZWQnIEFORCBjaGVja19zdGFydF90aW1lIEJFVFdFRU4gdG9EYXRlKCcyMDI0LTA1LTEwJykgQU5EIHRvRGF0ZSgnMjAyNC0wNS0xNScp) visible in CI DB. [#63982](https://github.com/ClickHouse/ClickHouse/pull/63982) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Slightly better setting `force_optimize_projection_name`. [#63997](https://github.com/ClickHouse/ClickHouse/pull/63997) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* chore(ci-workers): remove reusable from tailscale key. [#63999](https://github.com/ClickHouse/ClickHouse/pull/63999) ([Gabriel Martinez](https://github.com/GMartinez-Sisti)).
|
||||
* Better script to collect symbols statistics. [#64013](https://github.com/ClickHouse/ClickHouse/pull/64013) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix a typo in Analyzer. [#64022](https://github.com/ClickHouse/ClickHouse/pull/64022) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix libbcrypt for FreeBSD build. [#64023](https://github.com/ClickHouse/ClickHouse/pull/64023) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove some unnecessary `UNREACHABLE`s. [#64035](https://github.com/ClickHouse/ClickHouse/pull/64035) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add `ClickHouseVersion.copy` method. Create a branch release in advance without spinning out the release to increase the stability. [#64039](https://github.com/ClickHouse/ClickHouse/pull/64039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix searching for libclang_rt.builtins.*.a on FreeBSD. [#64051](https://github.com/ClickHouse/ClickHouse/pull/64051) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The mime type is not 100% reliable for Python and shell scripts without shebangs; add a check for file extension. [#64062](https://github.com/ClickHouse/ClickHouse/pull/64062) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix waiting for mutations with retriable errors. [#64063](https://github.com/ClickHouse/ClickHouse/pull/64063) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* harmonize h3PointDist* error messages. [#64080](https://github.com/ClickHouse/ClickHouse/pull/64080) ([Yohann Jardin](https://github.com/yohannj)).
|
||||
* This log message is better in Trace. [#64081](https://github.com/ClickHouse/ClickHouse/pull/64081) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Prevent stack overflow in Fuzzer and Stress test. [#64082](https://github.com/ClickHouse/ClickHouse/pull/64082) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* tests: fix expected error for 03036_reading_s3_archives (fixes CI). [#64089](https://github.com/ClickHouse/ClickHouse/pull/64089) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix sanitizers. [#64090](https://github.com/ClickHouse/ClickHouse/pull/64090) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update llvm/clang to 18.1.6. [#64091](https://github.com/ClickHouse/ClickHouse/pull/64091) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Set green Mergeable Check status only after all required checks are passed with success - All non-required checks are started at stage Test_3 when all required checks are passed in Test_1/2. [#64093](https://github.com/ClickHouse/ClickHouse/pull/64093) ([Max K.](https://github.com/maxknv)).
|
||||
* Move `isAllASCII` from UTFHelper to StringUtils. [#64108](https://github.com/ClickHouse/ClickHouse/pull/64108) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Throw out some `inline`s. [#64110](https://github.com/ClickHouse/ClickHouse/pull/64110) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Clean up .clang-tidy after transition to Clang 18. [#64111](https://github.com/ClickHouse/ClickHouse/pull/64111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Ignore exception when checking for cgroupsv2. [#64118](https://github.com/ClickHouse/ClickHouse/pull/64118) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add retries in git submodule update. [#64125](https://github.com/ClickHouse/ClickHouse/pull/64125) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* See https://s3.amazonaws.com/clickhouse-test-reports/63946/86cf1e13d866333b8a511badd7f2fe186d810646/ast_fuzzer__ubsan_.html. [#64127](https://github.com/ClickHouse/ClickHouse/pull/64127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Refactoring of Server.h: Isolate server management from other logic. [#64132](https://github.com/ClickHouse/ClickHouse/pull/64132) ([TTPO100AJIEX](https://github.com/TTPO100AJIEX)).
|
||||
* Syncing code. [#64135](https://github.com/ClickHouse/ClickHouse/pull/64135) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Losen build resource limits for unusual architectures. [#64152](https://github.com/ClickHouse/ClickHouse/pull/64152) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* fix clang tidy. [#64179](https://github.com/ClickHouse/ClickHouse/pull/64179) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix: 02124_insert_deduplication_token_multiple_blocks_replica. [#64181](https://github.com/ClickHouse/ClickHouse/pull/64181) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix global query profiler. [#64187](https://github.com/ClickHouse/ClickHouse/pull/64187) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* CI: cancel running PR wf after adding to MQ. [#64188](https://github.com/ClickHouse/ClickHouse/pull/64188) ([Max K.](https://github.com/maxknv)).
|
||||
* Add profile events for number of rows read during/after prewhere. [#64198](https://github.com/ClickHouse/ClickHouse/pull/64198) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add debug logging to EmbeddedRocksDBBulkSink. [#64203](https://github.com/ClickHouse/ClickHouse/pull/64203) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix special builds (due to excessive resource usage - memory/CPU). [#64204](https://github.com/ClickHouse/ClickHouse/pull/64204) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update InterpreterCreateQuery.cpp. [#64207](https://github.com/ClickHouse/ClickHouse/pull/64207) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Remove unused storage_snapshot field from MergeTreeSelectProcessor. [#64217](https://github.com/ClickHouse/ClickHouse/pull/64217) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Add test for [#37090](https://github.com/ClickHouse/ClickHouse/issues/37090). [#64220](https://github.com/ClickHouse/ClickHouse/pull/64220) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Small cli tool. [#64227](https://github.com/ClickHouse/ClickHouse/pull/64227) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Make `settings_changes_history` const. [#64230](https://github.com/ClickHouse/ClickHouse/pull/64230) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* test for [#45804](https://github.com/ClickHouse/ClickHouse/issues/45804). [#64245](https://github.com/ClickHouse/ClickHouse/pull/64245) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Update version after release. [#64283](https://github.com/ClickHouse/ClickHouse/pull/64283) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Followup for [#63691](https://github.com/ClickHouse/ClickHouse/issues/63691). [#64285](https://github.com/ClickHouse/ClickHouse/pull/64285) ([vdimir](https://github.com/vdimir)).
|
||||
* CI: dependency fix for changelog.py. [#64293](https://github.com/ClickHouse/ClickHouse/pull/64293) ([Max K.](https://github.com/maxknv)).
|
||||
* Print query in explain plan with parallel replicas. [#64298](https://github.com/ClickHouse/ClickHouse/pull/64298) ([vdimir](https://github.com/vdimir)).
|
||||
* CI: Cancel sync wf on new push. [#64299](https://github.com/ClickHouse/ClickHouse/pull/64299) ([Max K.](https://github.com/maxknv)).
|
||||
* CI: master workflow with folded jobs. [#64340](https://github.com/ClickHouse/ClickHouse/pull/64340) ([Max K.](https://github.com/maxknv)).
|
||||
* CI: Sync, Merge check, CI gh's statuses fixes. [#64348](https://github.com/ClickHouse/ClickHouse/pull/64348) ([Max K.](https://github.com/maxknv)).
|
||||
* Enable 02494_query_cache_nested_query_bug for Analyzer. [#64357](https://github.com/ClickHouse/ClickHouse/pull/64357) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Rename allow_deprecated_functions to allow_deprecated_error_prone_window_functions. [#64358](https://github.com/ClickHouse/ClickHouse/pull/64358) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Change input_format_parquet_use_native_reader to 24.6. [#64359](https://github.com/ClickHouse/ClickHouse/pull/64359) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update description for settings `cross_join_min_rows_to_compress` and `cross_join_min_bytes_to_compress`. [#64360](https://github.com/ClickHouse/ClickHouse/pull/64360) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* Changed the unreleased setting `aggregate_function_group_array_has_limit_size` to `aggregate_function_group_array_action_when_limit_is_reached`. [#64362](https://github.com/ClickHouse/ClickHouse/pull/64362) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts. [#64363](https://github.com/ClickHouse/ClickHouse/pull/64363) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Try to fix GWPAsan. [#64365](https://github.com/ClickHouse/ClickHouse/pull/64365) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* CI: add secrets to reusable stage wf yml. [#64366](https://github.com/ClickHouse/ClickHouse/pull/64366) ([Max K.](https://github.com/maxknv)).
|
||||
* Do not run tests tagged 'no-s3-storage-with-slow-build' with ASan. [#64367](https://github.com/ClickHouse/ClickHouse/pull/64367) ([vdimir](https://github.com/vdimir)).
|
||||
* This change was reverted. [#64386](https://github.com/ClickHouse/ClickHouse/pull/64386) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Update s3queue.md. [#64389](https://github.com/ClickHouse/ClickHouse/pull/64389) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* test for [#64211](https://github.com/ClickHouse/ClickHouse/issues/64211). [#64390](https://github.com/ClickHouse/ClickHouse/pull/64390) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Follow-up to [#59767](https://github.com/ClickHouse/ClickHouse/issues/59767). [#64398](https://github.com/ClickHouse/ClickHouse/pull/64398) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove wrong comment. [#64403](https://github.com/ClickHouse/ClickHouse/pull/64403) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Follow up to [#59767](https://github.com/ClickHouse/ClickHouse/issues/59767). [#64404](https://github.com/ClickHouse/ClickHouse/pull/64404) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Refactor s3 settings (move settings parsing into single place). [#64412](https://github.com/ClickHouse/ClickHouse/pull/64412) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* This PR was reverted. [#64423](https://github.com/ClickHouse/ClickHouse/pull/64423) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix test after [#64404](https://github.com/ClickHouse/ClickHouse/issues/64404). [#64432](https://github.com/ClickHouse/ClickHouse/pull/64432) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Faster TestKeeper shutdown. [#64433](https://github.com/ClickHouse/ClickHouse/pull/64433) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Remove some logging. [#64434](https://github.com/ClickHouse/ClickHouse/pull/64434) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Revert "Revert "Remove some unnecessary UNREACHABLEs"". [#64435](https://github.com/ClickHouse/ClickHouse/pull/64435) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Clean settings in 02943_variant_read_subcolumns test. [#64437](https://github.com/ClickHouse/ClickHouse/pull/64437) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add a comment after [#64226](https://github.com/ClickHouse/ClickHouse/issues/64226). [#64449](https://github.com/ClickHouse/ClickHouse/pull/64449) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* CI: fix build_report selection in case of job reuse. [#64459](https://github.com/ClickHouse/ClickHouse/pull/64459) ([Max K.](https://github.com/maxknv)).
|
||||
* Add Critical bugfix category in PR template. [#64480](https://github.com/ClickHouse/ClickHouse/pull/64480) ([Max K.](https://github.com/maxknv)).
|
||||
* Remove `generateSnowflakeIDThreadMonotonic`. [#64499](https://github.com/ClickHouse/ClickHouse/pull/64499) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Move analyzer attempt 2. [#64500](https://github.com/ClickHouse/ClickHouse/pull/64500) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Sync some code back from internal to public repository. [#64502](https://github.com/ClickHouse/ClickHouse/pull/64502) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove `generateUUIDv7(NonMonotonic|ThreadMonotonic)` functions. [#64506](https://github.com/ClickHouse/ClickHouse/pull/64506) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix bash completion for settings. [#64521](https://github.com/ClickHouse/ClickHouse/pull/64521) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Use max_read_buffer_size for file descriptors as well in file(). [#64532](https://github.com/ClickHouse/ClickHouse/pull/64532) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Temporarily disable `enable_vertical_final` setting by default. This feature should not be used in older releases because it [might crash](https://github.com/ClickHouse/ClickHouse/issues/64543), but it's already fixed in 24.6 where this setting change has been reverted and `enable_vertical_final` is again enabled by default. [#64544](https://github.com/ClickHouse/ClickHouse/pull/64544) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Removed excessive calls to `flush logs` and disabled under sanitizers. [#64550](https://github.com/ClickHouse/ClickHouse/pull/64550) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Sync code moved in private repo back back to public repo. [#64551](https://github.com/ClickHouse/ClickHouse/pull/64551) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add support for custom type to ASTLiteral, or else the type may be lost when parse the ast. E.g. set a ASTLiteral to DataTime32 with value 19870, then it will be parsed to Int16. [#64562](https://github.com/ClickHouse/ClickHouse/pull/64562) ([shuai.xu](https://github.com/shuai-xu)).
|
||||
* Add a temporary known host for git over ssh. [#64569](https://github.com/ClickHouse/ClickHouse/pull/64569) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Cache first analysis result in ReadFromMergeTree. [#64579](https://github.com/ClickHouse/ClickHouse/pull/64579) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Derive script parameters (labels) from the --repo/--from-repo - fix to not create backports for all release branches if backport for specific branch only. [#64603](https://github.com/ClickHouse/ClickHouse/pull/64603) ([Max K.](https://github.com/maxknv)).
|
||||
* CI fixes. [#64605](https://github.com/ClickHouse/ClickHouse/pull/64605) ([Max K.](https://github.com/maxknv)).
|
||||
* Double-checking [#59318](https://github.com/ClickHouse/ClickHouse/issues/59318) and docs for `Map`. [#64606](https://github.com/ClickHouse/ClickHouse/pull/64606) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update CHANGELOG.md. [#64609](https://github.com/ClickHouse/ClickHouse/pull/64609) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Tests: Convert numeric to symbolic error codes. [#64635](https://github.com/ClickHouse/ClickHouse/pull/64635) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Move NamedCollectionsFactory into a separate file. [#64642](https://github.com/ClickHouse/ClickHouse/pull/64642) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Shuffle tests for parallel execution. [#64646](https://github.com/ClickHouse/ClickHouse/pull/64646) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* CI: Do not upload binaries for special builds in PRs. [#64653](https://github.com/ClickHouse/ClickHouse/pull/64653) ([Max K.](https://github.com/maxknv)).
|
||||
* Update changelog. [#64654](https://github.com/ClickHouse/ClickHouse/pull/64654) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Parallel replicas: simple cleanup. [#64655](https://github.com/ClickHouse/ClickHouse/pull/64655) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Be more graceful with existing tables with `inverted` indexes. [#64656](https://github.com/ClickHouse/ClickHouse/pull/64656) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CI: Build Report Check to verify only enabled builds. [#64669](https://github.com/ClickHouse/ClickHouse/pull/64669) ([Max K.](https://github.com/maxknv)).
|
||||
* Tests: Convert error numbers to symbolic error codes, pt. II. [#64670](https://github.com/ClickHouse/ClickHouse/pull/64670) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Split query analyzer. [#64672](https://github.com/ClickHouse/ClickHouse/pull/64672) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* By the end of CI, CI_Running status must be SUCCESS or FAILURE never PENDING. [#64693](https://github.com/ClickHouse/ClickHouse/pull/64693) ([Max K.](https://github.com/maxknv)).
|
||||
* The following list of merged PRs is not present in the release branch and was added to the changelog by mistake:. [#64704](https://github.com/ClickHouse/ClickHouse/pull/64704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* CI: MergeQueue: add binary_release and unit tests. [#64705](https://github.com/ClickHouse/ClickHouse/pull/64705) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix to get first good enough GH token instead of getting and comparing all of them. [#64709](https://github.com/ClickHouse/ClickHouse/pull/64709) ([Max K.](https://github.com/maxknv)).
|
||||
* Check for missing Upload ID in CreateMultipartUpload reply. [#64714](https://github.com/ClickHouse/ClickHouse/pull/64714) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Update version_date.tsv and changelogs after v24.5.1.1763-stable. [#64715](https://github.com/ClickHouse/ClickHouse/pull/64715) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix (unreleased) `loop()` table function crashing on empty table name. [#64716](https://github.com/ClickHouse/ClickHouse/pull/64716) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Update CHANGELOG.md. [#64730](https://github.com/ClickHouse/ClickHouse/pull/64730) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* CI: ci.py refactoring. [#64734](https://github.com/ClickHouse/ClickHouse/pull/64734) ([Max K.](https://github.com/maxknv)).
|
||||
* Return the explanation for session moved error. [#64747](https://github.com/ClickHouse/ClickHouse/pull/64747) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Adjust the version_helper and script to a new release scheme. [#64759](https://github.com/ClickHouse/ClickHouse/pull/64759) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Do not try to write columns.txt if it does not exist for write-once storages. [#64762](https://github.com/ClickHouse/ClickHouse/pull/64762) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update 02482_load_parts_refcounts.sh. [#64765](https://github.com/ClickHouse/ClickHouse/pull/64765) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix crash with DISTINCT and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix assert in IObjectStorageIteratorAsync. [#64770](https://github.com/ClickHouse/ClickHouse/pull/64770) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Make table functions always report engine 'StorageProxy' in system.tables. [#64771](https://github.com/ClickHouse/ClickHouse/pull/64771) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Ask about company name on GitHub. [#64774](https://github.com/ClickHouse/ClickHouse/pull/64774) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky tests about SQLite. [#64776](https://github.com/ClickHouse/ClickHouse/pull/64776) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove iostream debug helpers. [#64777](https://github.com/ClickHouse/ClickHouse/pull/64777) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove unnecessary comment. [#64785](https://github.com/ClickHouse/ClickHouse/pull/64785) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Follow-ups to some PRs. [#64787](https://github.com/ClickHouse/ClickHouse/pull/64787) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Attempt to fix 02228_merge_tree_insert_memory_usage.sql flakiness for s3. [#64800](https://github.com/ClickHouse/ClickHouse/pull/64800) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add regression test for filter propagation through `Merge` engine. [#64806](https://github.com/ClickHouse/ClickHouse/pull/64806) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Migrate changelog.py to a descendant of fuzzywuzzy. [#64807](https://github.com/ClickHouse/ClickHouse/pull/64807) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* A follow-up for https://github.com/ClickHouse/ClickHouse/pull/64039 and [#64759](https://github.com/ClickHouse/ClickHouse/issues/64759). [#64813](https://github.com/ClickHouse/ClickHouse/pull/64813) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Make row order optimization non-experimental. [#64814](https://github.com/ClickHouse/ClickHouse/pull/64814) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Didn't catch it at the time when all versions belonged to the current year. [#64817](https://github.com/ClickHouse/ClickHouse/pull/64817) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix clang-tidy build. [#64823](https://github.com/ClickHouse/ClickHouse/pull/64823) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Sets all builds that we run tests on to normal build list. [#64824](https://github.com/ClickHouse/ClickHouse/pull/64824) ([Max K.](https://github.com/maxknv)).
|
||||
* CI: fix CI await feature. [#64825](https://github.com/ClickHouse/ClickHouse/pull/64825) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix clang-tidy. [#64827](https://github.com/ClickHouse/ClickHouse/pull/64827) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Upload blob_storage_log from stateless tests. [#64843](https://github.com/ClickHouse/ClickHouse/pull/64843) ([alesapin](https://github.com/alesapin)).
|
||||
* Follow-up to [#64349](https://github.com/ClickHouse/ClickHouse/issues/64349). [#64845](https://github.com/ClickHouse/ClickHouse/pull/64845) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Simplify handling of old 'inverted' indexes. [#64846](https://github.com/ClickHouse/ClickHouse/pull/64846) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Use issue templates defined in YAML provide more user-friendly experience. [#64850](https://github.com/ClickHouse/ClickHouse/pull/64850) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Handle logs from rocksdb by ClickHouse internal logging. [#64856](https://github.com/ClickHouse/ClickHouse/pull/64856) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Follow-up for https://github.com/ClickHouse/ClickHouse/pull/59357. [#64860](https://github.com/ClickHouse/ClickHouse/pull/64860) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* added mlock and mlockall to aspell-dict to be ignored. [#64863](https://github.com/ClickHouse/ClickHouse/pull/64863) ([Ali](https://github.com/xogoodnow)).
|
||||
* A tiny fix for fancy quotes. [#64883](https://github.com/ClickHouse/ClickHouse/pull/64883) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible loss of "Query was cancelled" message in client. [#64888](https://github.com/ClickHouse/ClickHouse/pull/64888) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* We accidentally lost the way to set `PR Check` failure at some point. [#64890](https://github.com/ClickHouse/ClickHouse/pull/64890) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix global trace collector. [#64896](https://github.com/ClickHouse/ClickHouse/pull/64896) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test_mask_sensitive_info/test.py::test_create_table. [#64901](https://github.com/ClickHouse/ClickHouse/pull/64901) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update 03165_string_functions_with_token_text_indexes.sql. [#64903](https://github.com/ClickHouse/ClickHouse/pull/64903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* When the branch is removed, it's impossible to get the diff by the labels. `print` in imported files spoils the `ipython` output. [#64904](https://github.com/ClickHouse/ClickHouse/pull/64904) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable transactions for unsupported storages even for materialized v…. [#64918](https://github.com/ClickHouse/ClickHouse/pull/64918) ([alesapin](https://github.com/alesapin)).
|
||||
* additional log for cleanupDetachedTables. [#64919](https://github.com/ClickHouse/ClickHouse/pull/64919) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Fix tupleConcat of two empty tuples. This fixes [#64885](https://github.com/ClickHouse/ClickHouse/issues/64885). [#64923](https://github.com/ClickHouse/ClickHouse/pull/64923) ([Amos Bird](https://github.com/amosbird)).
|
||||
* CI: Minor fixes in ci scripts. [#64950](https://github.com/ClickHouse/ClickHouse/pull/64950) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix error message (it was strange). [#64952](https://github.com/ClickHouse/ClickHouse/pull/64952) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update fmtlib version to 9.1.0. [#64959](https://github.com/ClickHouse/ClickHouse/pull/64959) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Test 02908_many_requests_to_system_replicas makes a lot of heavy requests and it overloads server if it's an ASAN build. [#64966](https://github.com/ClickHouse/ClickHouse/pull/64966) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix (unreleased) bug in short circuit evaluation. [#64967](https://github.com/ClickHouse/ClickHouse/pull/64967) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update version_date.tsv and changelogs after v24.4.2.141-stable. [#64968](https://github.com/ClickHouse/ClickHouse/pull/64968) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix `test_attach_partition_using_copy`. [#64977](https://github.com/ClickHouse/ClickHouse/pull/64977) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Faster processing of scheduler queue activations. [#64985](https://github.com/ClickHouse/ClickHouse/pull/64985) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* CI: Fix nightly workflow. [#64987](https://github.com/ClickHouse/ClickHouse/pull/64987) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix innocuous data race in detectLanguage. [#64988](https://github.com/ClickHouse/ClickHouse/pull/64988) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* CI: Builds in CI settings. [#64994](https://github.com/ClickHouse/ClickHouse/pull/64994) ([Max K.](https://github.com/maxknv)).
|
||||
* REVERTED. [#65009](https://github.com/ClickHouse/ClickHouse/pull/65009) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* CI: Fix backports. [#65010](https://github.com/ClickHouse/ClickHouse/pull/65010) ([Max K.](https://github.com/maxknv)).
|
||||
* Try fix 03143_prewhere_profile_events. [#65014](https://github.com/ClickHouse/ClickHouse/pull/65014) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix 03165_string_functions_with_token_text_indexes. [#65018](https://github.com/ClickHouse/ClickHouse/pull/65018) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* This change was reverted. [#65028](https://github.com/ClickHouse/ClickHouse/pull/65028) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Bump googletest to latest HEAD. [#65038](https://github.com/ClickHouse/ClickHouse/pull/65038) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve comment about AsynchronousMetrics. [#65040](https://github.com/ClickHouse/ClickHouse/pull/65040) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* CI: Remove fuzzer build from normal CI run (bugfix). [#65041](https://github.com/ClickHouse/ClickHouse/pull/65041) ([Max K.](https://github.com/maxknv)).
|
||||
* CI config refactoring. [#65045](https://github.com/ClickHouse/ClickHouse/pull/65045) ([Max K.](https://github.com/maxknv)).
|
||||
* Bump abseil to latest HEAD. [#65048](https://github.com/ClickHouse/ClickHouse/pull/65048) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Stateless tests: add test for SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT. [#65056](https://github.com/ClickHouse/ClickHouse/pull/65056) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* Increase timeout in wait_for_all_mutations. [#65058](https://github.com/ClickHouse/ClickHouse/pull/65058) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Tests for _time virtual column in file alike storages. [#65064](https://github.com/ClickHouse/ClickHouse/pull/65064) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Update odbc-bridge.md. [#65099](https://github.com/ClickHouse/ClickHouse/pull/65099) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Small fix for 02340_parts_refcnt_mergetree. [#65105](https://github.com/ClickHouse/ClickHouse/pull/65105) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update test_replicated_database/test.py. [#65112](https://github.com/ClickHouse/ClickHouse/pull/65112) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix false positives leaky memory warnings in OpenSSL. [#65125](https://github.com/ClickHouse/ClickHouse/pull/65125) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix `Initiator received more initial requests than there are replicas` with `loop` engine. [#65133](https://github.com/ClickHouse/ClickHouse/pull/65133) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix 'Tasks in BackgroundSchedulePool cannot throw' caused by MergeTreeData::loadUnexpectedDataParts(). [#65135](https://github.com/ClickHouse/ClickHouse/pull/65135) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix bad error message. [#65137](https://github.com/ClickHouse/ClickHouse/pull/65137) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Just fixing flaky unit tests. [#65152](https://github.com/ClickHouse/ClickHouse/pull/65152) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* This change was reverted. [#65164](https://github.com/ClickHouse/ClickHouse/pull/65164) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Ensure submodules are named consistently. [#65167](https://github.com/ClickHouse/ClickHouse/pull/65167) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove obsolete fix from aws submodule. [#65168](https://github.com/ClickHouse/ClickHouse/pull/65168) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CI: Fix not-merged cherry-picks for backports. [#65181](https://github.com/ClickHouse/ClickHouse/pull/65181) ([Max K.](https://github.com/maxknv)).
|
||||
* Add an assertion in ReplicatedMergeTreeQueue. [#65184](https://github.com/ClickHouse/ClickHouse/pull/65184) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix bug in unreleased code. [#65185](https://github.com/ClickHouse/ClickHouse/pull/65185) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix docs for skipping-indexes.md. [#65194](https://github.com/ClickHouse/ClickHouse/pull/65194) ([morning-color](https://github.com/morning-color)).
|
||||
* Fix the descriptions of some server settings. [#65200](https://github.com/ClickHouse/ClickHouse/pull/65200) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix issue after [#64813](https://github.com/ClickHouse/ClickHouse/issues/64813) with broken search in the changelog, and missing zstd in a style-check image. [#65202](https://github.com/ClickHouse/ClickHouse/pull/65202) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix bug in unreleased code. [#65203](https://github.com/ClickHouse/ClickHouse/pull/65203) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add test prewhere merge. [#65207](https://github.com/ClickHouse/ClickHouse/pull/65207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Sync ProfileEvents.h. [#65208](https://github.com/ClickHouse/ClickHouse/pull/65208) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* FinishCheck to set failure if workflow failed. [#65228](https://github.com/ClickHouse/ClickHouse/pull/65228) ([Max K.](https://github.com/maxknv)).
|
||||
* Update version_date.tsv and changelogs after v24.3.4.147-lts. [#65235](https://github.com/ClickHouse/ClickHouse/pull/65235) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.5.3.5-stable. [#65240](https://github.com/ClickHouse/ClickHouse/pull/65240) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fails sometimes for debug build https://s3.amazonaws.com/clickhouse-test-reports/0/af6afd904316bfb771737faa147ce8aea72dd705/stateless_tests__debug__[4_5].html. [#65245](https://github.com/ClickHouse/ClickHouse/pull/65245) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix libunwind in CI. [#65247](https://github.com/ClickHouse/ClickHouse/pull/65247) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* CI: Do not skip FinishCheck in Merge Queue. [#65249](https://github.com/ClickHouse/ClickHouse/pull/65249) ([Max K.](https://github.com/maxknv)).
|
||||
* Add a test just in case. [#65271](https://github.com/ClickHouse/ClickHouse/pull/65271) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable 02581_share_big_sets_between_multiple_mutations_tasks_long in coverage run. [#65295](https://github.com/ClickHouse/ClickHouse/pull/65295) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Update version_date.tsv and changelogs after v23.8.15.35-lts. [#65300](https://github.com/ClickHouse/ClickHouse/pull/65300) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* mute test test_query_is_canceled_with_inf_retries. [#65301](https://github.com/ClickHouse/ClickHouse/pull/65301) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix silly typo that caused wrong tags messages. [#65307](https://github.com/ClickHouse/ClickHouse/pull/65307) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Save server data for failed stateless tests. [#65309](https://github.com/ClickHouse/ClickHouse/pull/65309) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix 01246_buffer_flush flakiness (by tuning timeouts). [#65310](https://github.com/ClickHouse/ClickHouse/pull/65310) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove outdated override in stress tests. [#65323](https://github.com/ClickHouse/ClickHouse/pull/65323) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad code in `system.session_log`. [#65332](https://github.com/ClickHouse/ClickHouse/pull/65332) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* add tests for 'boom filter index with map'. [#65333](https://github.com/ClickHouse/ClickHouse/pull/65333) ([iceFireser](https://github.com/iceFireser)).
|
||||
* Fix crash in 03036_dynamic_read_subcolumns. [#65341](https://github.com/ClickHouse/ClickHouse/pull/65341) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Move tests 02942_variant_cast and 02944_variant_as_common_type to analyzer_tech_debt.txt. [#65342](https://github.com/ClickHouse/ClickHouse/pull/65342) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* REVERTED. [#65384](https://github.com/ClickHouse/ClickHouse/pull/65384) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* CI: Add Non-blocking (Woolen wolfdog) CI mode. [#65385](https://github.com/ClickHouse/ClickHouse/pull/65385) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix compatibility release check. [#65394](https://github.com/ClickHouse/ClickHouse/pull/65394) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* Move a leaksan suppression from Poco into OpenSSL. [#65396](https://github.com/ClickHouse/ClickHouse/pull/65396) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix tidy build. [#65415](https://github.com/ClickHouse/ClickHouse/pull/65415) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Remove Tests dependency on Builds_2. No tests depend on Builds_2. [#65416](https://github.com/ClickHouse/ClickHouse/pull/65416) ([Max K.](https://github.com/maxknv)).
|
||||
* CI: PR workflow dependencies fix. [#65442](https://github.com/ClickHouse/ClickHouse/pull/65442) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix test_storage_s3_queue/test.py::test_max_set_age. [#65452](https://github.com/ClickHouse/ClickHouse/pull/65452) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* CI: Rename A Sync status. [#65456](https://github.com/ClickHouse/ClickHouse/pull/65456) ([Max K.](https://github.com/maxknv)).
|
||||
* CI: Rename sync status. [#65464](https://github.com/ClickHouse/ClickHouse/pull/65464) ([Max K.](https://github.com/maxknv)).
|
||||
* This change was reverted. [#65466](https://github.com/ClickHouse/ClickHouse/pull/65466) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Remove a feature wasn't part of any release yet. [#65480](https://github.com/ClickHouse/ClickHouse/pull/65480) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65657](https://github.com/ClickHouse/ClickHouse/issues/65657): Fix of `PlanSquashingTransform`: pipeline stuck. [#65487](https://github.com/ClickHouse/ClickHouse/pull/65487) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Backported in [#65504](https://github.com/ClickHouse/ClickHouse/issues/65504): Fix bad test `02922_deduplication_with_zero_copy`. [#65492](https://github.com/ClickHouse/ClickHouse/pull/65492) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#65591](https://github.com/ClickHouse/ClickHouse/issues/65591): Setting `uniform_snowflake_conversion_functions` (not in any release yet) was replaced by setting `allow_deprecated_snowflake_conversion_functions`. The latter controls if the legacy snowflake conversion functions are available (by default, they are not). [#65522](https://github.com/ClickHouse/ClickHouse/pull/65522) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#65759](https://github.com/ClickHouse/ClickHouse/issues/65759): Renames Build report jobs. [#65554](https://github.com/ClickHouse/ClickHouse/pull/65554) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#65773](https://github.com/ClickHouse/ClickHouse/issues/65773): `base64En/Decode64Url` --> `base64En/Decode64URL`. [#65760](https://github.com/ClickHouse/ClickHouse/pull/65760) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#65805](https://github.com/ClickHouse/ClickHouse/issues/65805): CI: Fix for Builds report job in backports and releases. [#65774](https://github.com/ClickHouse/ClickHouse/pull/65774) ([Max K.](https://github.com/maxknv)).
|
||||
|
@ -993,11 +993,11 @@ They can be used for prewhere optimization only if we enable `set allow_statisti
|
||||
|
||||
- `TDigest`
|
||||
|
||||
Stores distribution of values from numeric columns in [TDigest](https://github.com/tdunning/t-digest) sketch.
|
||||
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
|
||||
|
||||
- `Uniq`
|
||||
|
||||
Estimate the number of distinct values of a column by HyperLogLog.
|
||||
|
||||
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
||||
|
||||
## Column-level Settings {#column-level-settings}
|
||||
|
||||
|
@ -6,23 +6,30 @@ import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.m
|
||||
|
||||
<SelfManaged />
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<user_name>
|
||||
<user_name_1>
|
||||
<ssl_certificates>
|
||||
<common_name>host.domain.com:example_user</common_name>
|
||||
<common_name>host.domain.com:example_user_dev</common_name>
|
||||
<!-- More names -->
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name>
|
||||
</user_name_1>
|
||||
<user_name_2>
|
||||
<ssl_certificates>
|
||||
<subject_alt_name>DNS:host.domain.com</subject_alt_name>
|
||||
<!-- More names -->
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name_2>
|
||||
</users>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -2536,7 +2536,7 @@ Possible values:
|
||||
- 0 — Optimization disabled.
|
||||
- 1 — Optimization enabled.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
## optimize_trivial_count_query {#optimize-trivial-count-query}
|
||||
|
||||
|
@ -1,24 +1,20 @@
|
||||
---
|
||||
slug: /en/sql-reference/data-types/json
|
||||
slug: /en/sql-reference/data-types/object-data-type
|
||||
sidebar_position: 26
|
||||
sidebar_label: JSON
|
||||
sidebar_label: Object Data Type
|
||||
keywords: [object, data type]
|
||||
---
|
||||
|
||||
# JSON
|
||||
# Object Data Type
|
||||
|
||||
:::note
|
||||
This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead.
|
||||
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
|
||||
:::
|
||||
|
||||
Stores JavaScript Object Notation (JSON) documents in a single column.
|
||||
|
||||
`JSON` is an alias for `Object('json')`.
|
||||
|
||||
:::note
|
||||
The JSON data type is an obsolete feature. Do not use it.
|
||||
If you want to use it, set `allow_experimental_object_type = 1`.
|
||||
:::
|
||||
|
||||
## Example
|
||||
|
||||
**Example 1**
|
||||
@ -49,7 +45,7 @@ SELECT o.a, o.b.c, o.b.d[3] FROM json
|
||||
|
||||
**Example 2**
|
||||
|
||||
To be able to create an ordered `MergeTree` family table the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format:
|
||||
To be able to create an ordered `MergeTree` family table, the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format:
|
||||
|
||||
```sql
|
||||
CREATE TABLE logs
|
||||
@ -69,7 +65,7 @@ FROM file('access.json.gz', JSONAsString)
|
||||
|
||||
## Displaying JSON columns
|
||||
|
||||
When displaying a `JSON` column ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can display the field names as well by setting `output_format_json_named_tuples_as_objects = 1`:
|
||||
When displaying a `JSON` column, ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can also display the field names by setting `output_format_json_named_tuples_as_objects = 1`:
|
||||
|
||||
```sql
|
||||
SET output_format_json_named_tuples_as_objects = 1
|
||||
@ -83,4 +79,5 @@ SELECT * FROM json FORMAT JSONEachRow
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json)
|
||||
- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json)
|
||||
|
@ -173,7 +173,7 @@ See function [substring](string-functions.md#substring).
|
||||
|
||||
## bitTest
|
||||
|
||||
Takes any integer and converts it into [binary form](https://en.wikipedia.org/wiki/Binary_number), returns the value of a bit at specified position. The countdown starts from 0 from the right to the left.
|
||||
Takes any integer and converts it into [binary form](https://en.wikipedia.org/wiki/Binary_number), returns the value of a bit at specified position. Counting is right-to-left, starting at 0.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -226,7 +226,7 @@ Result:
|
||||
|
||||
## bitTestAll
|
||||
|
||||
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. The countdown starts from 0 from the right to the left.
|
||||
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
||||
|
||||
The conjuction for bit-wise operations:
|
||||
|
||||
@ -289,7 +289,7 @@ Result:
|
||||
|
||||
## bitTestAny
|
||||
|
||||
Returns result of [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) (OR operator) of all bits at given positions. The countdown starts from 0 from the right to the left.
|
||||
Returns result of [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) (OR operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
||||
|
||||
The disjunction for bit-wise operations:
|
||||
|
||||
|
@ -3860,3 +3860,138 @@ Result:
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## transactionID
|
||||
|
||||
Returns the ID of a [transaction](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback).
|
||||
|
||||
:::note
|
||||
This function is part of an experimental feature set. Enable experimental transaction support by adding this setting to your configuration:
|
||||
|
||||
```
|
||||
<clickhouse>
|
||||
<allow_experimental_transactions>1</allow_experimental_transactions>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
For more information see the page [Transactional (ACID) support](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback).
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
transactionID()
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a tuple consisting of `start_csn`, `local_tid` and `host_id`. [Tuple](../data-types/tuple.md).
|
||||
|
||||
- `start_csn`: Global sequential number, the newest commit timestamp that was seen when this transaction began. [UInt64](../data-types/int-uint.md).
|
||||
- `local_tid`: Local sequential number that is unique for each transaction started by this host within a specific start_csn. [UInt64](../data-types/int-uint.md).
|
||||
- `host_id`: UUID of the host that has started this transaction. [UUID](../data-types/uuid.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
BEGIN TRANSACTION;
|
||||
SELECT transactionID();
|
||||
ROLLBACK;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─transactionID()────────────────────────────────┐
|
||||
│ (32,34,'0ee8b069-f2bb-4748-9eae-069c85b5252b') │
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## transactionLatestSnapshot
|
||||
|
||||
Returns the newest snapshot (Commit Sequence Number) of a [transaction](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback) that is available for reading.
|
||||
|
||||
:::note
|
||||
This function is part of an experimental feature set. Enable experimental transaction support by adding this setting to your configuration:
|
||||
|
||||
```
|
||||
<clickhouse>
|
||||
<allow_experimental_transactions>1</allow_experimental_transactions>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
For more information see the page [Transactional (ACID) support](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback).
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
transactionLatestSnapshot()
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns the latest snapshot (CSN) of a transaction. [UInt64](../data-types/int-uint.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
BEGIN TRANSACTION;
|
||||
SELECT transactionLatestSnapshot();
|
||||
ROLLBACK;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─transactionLatestSnapshot()─┐
|
||||
│ 32 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## transactionOldestSnapshot
|
||||
|
||||
Returns the oldest snapshot (Commit Sequence Number) that is visible for some running [transaction](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback).
|
||||
|
||||
:::note
|
||||
This function is part of an experimental feature set. Enable experimental transaction support by adding this setting to your configuration:
|
||||
|
||||
```
|
||||
<clickhouse>
|
||||
<allow_experimental_transactions>1</allow_experimental_transactions>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
For more information see the page [Transactional (ACID) support](https://clickhouse.com/docs/en/guides/developer/transactional#transactions-commit-and-rollback).
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
transactionOldestSnapshot()
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns the oldest snapshot (CSN) of a transaction. [UInt64](../data-types/int-uint.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
BEGIN TRANSACTION;
|
||||
SELECT transactionLatestSnapshot();
|
||||
ROLLBACK;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─transactionOldestSnapshot()─┐
|
||||
│ 32 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
@ -579,7 +579,6 @@ If the length of the UTF-8 byte sequence is different for upper and lower case o
|
||||
|
||||
Converts a string to uppercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||
|
||||
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||
|
||||
@ -736,7 +735,7 @@ concat(s1, s2, ...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
At least one value of arbitrary type.
|
||||
Values of arbitrary type.
|
||||
|
||||
Arguments which are not of types [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md) are converted to strings using their default serialization. As this decreases performance, it is not recommended to use non-String/FixedString arguments.
|
||||
|
||||
|
@ -28,6 +28,6 @@ There is an example adding two statistics types to two columns:
|
||||
ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq;
|
||||
```
|
||||
|
||||
:::note
|
||||
:::note
|
||||
Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants).
|
||||
:::
|
||||
|
@ -12,7 +12,7 @@ Syntax:
|
||||
``` sql
|
||||
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'}]
|
||||
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[VALID UNTIL datetime]
|
||||
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
|
||||
|
@ -152,7 +152,7 @@ SELECT * FROM test;
|
||||
|
||||
`MATERIALIZED expr`
|
||||
|
||||
Materialized expression. Values of such columns are always calculated, they cannot be specified in INSERT queries.
|
||||
Materialized expression. Values of such columns are automatically calculated according to the specified materialized expression when rows are inserted. Values cannot be explicitly specified during `INSERT`s.
|
||||
|
||||
Also, default value columns of this type are not included in the result of `SELECT *`. This is to preserve the invariant that the result of a `SELECT *` can always be inserted back into the table using `INSERT`. This behavior can be disabled with setting `asterisk_include_materialized_columns`.
|
||||
|
||||
|
@ -12,7 +12,7 @@ Syntax:
|
||||
``` sql
|
||||
CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'} | {WITH http SERVER 'server_name' [SCHEME 'Basic']}]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'} | {WITH http SERVER 'server_name' [SCHEME 'Basic']}]
|
||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[VALID UNTIL datetime]
|
||||
[IN access_storage_type]
|
||||
|
@ -269,9 +269,9 @@ FROM s3(
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_path` — Path to the file. Type: `LowCardinalty(String)`.
|
||||
- `_file` — Name of the file. Type: `LowCardinalty(String)`.
|
||||
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
|
||||
- `_path` — Path to the file. Type: `LowCardinalty(String)`. In case of archive, shows path in a format: "{path_to_archive}::{path_to_file_inside_archive}"
|
||||
- `_file` — Name of the file. Type: `LowCardinalty(String)`. In case of archive shows name of the file inside the archive.
|
||||
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. In case of archive shows uncompressed file size of the file inside the archive.
|
||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
@ -80,8 +80,8 @@ These functions can be used only as a window function.
|
||||
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||
- `rank()` - Rank the current row within its partition with gaps.
|
||||
- `dense_rank()` - Rank the current row within its partition without gaps.
|
||||
- `lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||
- `leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||
- `lagInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. The offset parameter, if not specified, defaults to 1, meaning it will fetch the value from the next row. If the calculated row exceeds the boundaries of the window frame, the specified default value is returned.
|
||||
- `leadInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. If offset is not provided, it defaults to 1. If the offset leads to a position outside the window frame, the specified default value is used.
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -3,23 +3,30 @@ slug: /ru/operations/external-authenticators/ssl-x509
|
||||
---
|
||||
# Аутентификация по сертификату SSL X.509 {#ssl-external-authentication}
|
||||
|
||||
[Опция 'strict'](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) включает обязательную проверку сертификатов входящих соединений в библиотеке `SSL`. В этом случае могут быть установлены только соединения, представившие действительный сертификат. Соединения с недоверенными сертификатами будут отвергнуты. Таким образом, проверка сертификата позволяет однозначно аутентифицировать входящее соединение. Идентификация пользователя осуществляется по полю `Common Name` сертификата. Это позволяет ассоциировать несколько сертификатов с одним и тем же пользователем. Дополнительно, перевыпуск и отзыв сертификата не требуют изменения конфигурации ClickHouse.
|
||||
[Опция 'strict'](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) включает обязательную проверку сертификатов входящих соединений в библиотеке `SSL`. В этом случае могут быть установлены только соединения, представившие действительный сертификат. Соединения с недоверенными сертификатами будут отвергнуты. Таким образом, проверка сертификата позволяет однозначно аутентифицировать входящее соединение. Идентификация пользователя осуществляется по полю `Common Name` или `subjectAltName` сертификата. Это позволяет ассоциировать несколько сертификатов с одним и тем же пользователем. Дополнительно, перевыпуск и отзыв сертификата не требуют изменения конфигурации ClickHouse.
|
||||
|
||||
Для включения аутентификации по SSL сертификату, необходимо указать список `Common Name` для каждого пользователя ClickHouse в файле настройки `config.xml`:
|
||||
Для включения аутентификации по SSL сертификату, необходимо указать список `Common Name` или `subjectAltName` для каждого пользователя ClickHouse в файле настройки `config.xml`:
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<user_name>
|
||||
<certificates>
|
||||
<user_name_1>
|
||||
<ssl_certificates>
|
||||
<common_name>host.domain.com:example_user</common_name>
|
||||
<common_name>host.domain.com:example_user_dev</common_name>
|
||||
<!-- More names -->
|
||||
</certificates>
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name>
|
||||
</user_name_1>
|
||||
<user_name_2>
|
||||
<ssl_certificates>
|
||||
<subject_alt_name>DNS:host.domain.com</subject_alt_name>
|
||||
<!-- More names -->
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name_2>
|
||||
</users>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -2077,7 +2077,7 @@ SELECT * FROM test_table
|
||||
- 0 — оптимизация отключена.
|
||||
- 1 — оптимизация включена.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
## optimize_trivial_count_query {#optimize-trivial-count-query}
|
||||
|
||||
|
@ -13,7 +13,7 @@ sidebar_label: "Пользователь"
|
||||
``` sql
|
||||
CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'}]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name' | SAN 'TYPE:subject_alt_name'} | {WITH ssh_key BY KEY 'public_key' TYPE 'ssh-rsa|...'}]
|
||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[DEFAULT ROLE role [,...]]
|
||||
[DEFAULT DATABASE database | NONE]
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
@ -47,9 +48,17 @@ Chunk ODBCSource::generate()
|
||||
for (int idx = 0; idx < result.columns(); ++idx)
|
||||
{
|
||||
const auto & sample = description.sample_block.getByPosition(idx);
|
||||
|
||||
if (!result.is_null(idx))
|
||||
insertValue(*columns[idx], removeNullable(sample.type), description.types[idx].first, result, idx);
|
||||
{
|
||||
if (columns[idx]->isNullable())
|
||||
{
|
||||
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*columns[idx]);
|
||||
insertValue(column_nullable.getNestedColumn(), removeNullable(sample.type), description.types[idx].first, result, idx);
|
||||
column_nullable.getNullMapData().emplace_back(0);
|
||||
}
|
||||
else
|
||||
insertValue(*columns[idx], removeNullable(sample.type), description.types[idx].first, result, idx);
|
||||
}
|
||||
else
|
||||
insertDefaultValue(*columns[idx], *sample.column);
|
||||
}
|
||||
|
@ -133,10 +133,6 @@
|
||||
# include <Server/KeeperTCPHandlerFactory.h>
|
||||
#endif
|
||||
|
||||
#if USE_JEMALLOC
|
||||
# include <jemalloc/jemalloc.h>
|
||||
#endif
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
# include <azure/storage/common/internal/xml_wrapper.hpp>
|
||||
# include <azure/core/diagnostics/logger.hpp>
|
||||
@ -176,34 +172,10 @@ namespace ProfileEvents
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
static bool jemallocOptionEnabled(const char *name)
|
||||
{
|
||||
bool value;
|
||||
size_t size = sizeof(value);
|
||||
|
||||
if (mallctl(name, reinterpret_cast<void *>(&value), &size, /* newp= */ nullptr, /* newlen= */ 0))
|
||||
throw Poco::SystemException("mallctl() failed");
|
||||
|
||||
return value;
|
||||
}
|
||||
#else
|
||||
static bool jemallocOptionEnabled(const char *) { return false; }
|
||||
#endif
|
||||
|
||||
int mainEntryClickHouseServer(int argc, char ** argv)
|
||||
{
|
||||
DB::Server app;
|
||||
|
||||
if (jemallocOptionEnabled("opt.background_thread"))
|
||||
{
|
||||
LOG_ERROR(&app.logger(),
|
||||
"jemalloc.background_thread was requested, "
|
||||
"however ClickHouse uses percpu_arena and background_thread most likely will not give any benefits, "
|
||||
"and also background_thread is not compatible with ClickHouse watchdog "
|
||||
"(that can be disabled with CLICKHOUSE_WATCHDOG_ENABLE=0)");
|
||||
}
|
||||
|
||||
/// Do not fork separate process from watchdog if we attached to terminal.
|
||||
/// Otherwise it breaks gdb usage.
|
||||
/// Can be overridden by environment variable (cannot use server config at this moment).
|
||||
|
@ -506,6 +506,14 @@ let user = 'default';
|
||||
let password = '';
|
||||
let add_http_cors_header = (location.protocol != 'file:');
|
||||
|
||||
const current_url = new URL(window.location);
|
||||
/// Substitute user name if it's specified in the query string
|
||||
const user_from_url = current_url.searchParams.get('user');
|
||||
if (user_from_url) {
|
||||
user = user_from_url;
|
||||
}
|
||||
|
||||
|
||||
const errorCodeMessageMap = {
|
||||
516: 'Error authenticating with database. Please check your connection params and try again.'
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/SSHWrapper.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -238,7 +239,15 @@ bool Authentication::areCredentialsValid(
|
||||
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
|
||||
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
return auth_data.getSSLCertificateCommonNames().contains(ssl_certificate_credentials->getCommonName());
|
||||
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
|
||||
{
|
||||
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
|
||||
{
|
||||
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
case AuthenticationType::SSH_KEY:
|
||||
#if USE_SSH
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_SSL
|
||||
@ -107,7 +108,7 @@ bool operator ==(const AuthenticationData & lhs, const AuthenticationData & rhs)
|
||||
{
|
||||
return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash)
|
||||
&& (lhs.ldap_server_name == rhs.ldap_server_name) && (lhs.kerberos_realm == rhs.kerberos_realm)
|
||||
&& (lhs.ssl_certificate_common_names == rhs.ssl_certificate_common_names)
|
||||
&& (lhs.ssl_certificate_subjects == rhs.ssl_certificate_subjects)
|
||||
#if USE_SSH
|
||||
&& (lhs.ssh_keys == rhs.ssh_keys)
|
||||
#endif
|
||||
@ -277,11 +278,16 @@ String AuthenticationData::getSalt() const
|
||||
return salt;
|
||||
}
|
||||
|
||||
void AuthenticationData::setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_)
|
||||
void AuthenticationData::setSSLCertificateSubjects(SSLCertificateSubjects && ssl_certificate_subjects_)
|
||||
{
|
||||
if (common_names_.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.");
|
||||
ssl_certificate_common_names = std::move(common_names_);
|
||||
if (ssl_certificate_subjects_.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 'SSL CERTIFICATE' authentication type requires a non-empty list of subjects.");
|
||||
ssl_certificate_subjects = std::move(ssl_certificate_subjects_);
|
||||
}
|
||||
|
||||
void AuthenticationData::addSSLCertificateSubject(SSLCertificateSubjects::Type type_, String && subject_)
|
||||
{
|
||||
ssl_certificate_subjects.insert(type_, std::move(subject_));
|
||||
}
|
||||
|
||||
std::shared_ptr<ASTAuthenticationData> AuthenticationData::toAST() const
|
||||
@ -339,7 +345,14 @@ std::shared_ptr<ASTAuthenticationData> AuthenticationData::toAST() const
|
||||
}
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
{
|
||||
for (const auto & name : getSSLCertificateCommonNames())
|
||||
using SSLCertificateSubjects::Type::CN;
|
||||
using SSLCertificateSubjects::Type::SAN;
|
||||
|
||||
const auto &subjects = getSSLCertificateSubjects();
|
||||
SSLCertificateSubjects::Type cert_subject_type = !subjects.at(SAN).empty() ? SAN : CN;
|
||||
|
||||
node->ssl_cert_subject_type = toString(cert_subject_type);
|
||||
for (const auto & name : getSSLCertificateSubjects().at(cert_subject_type))
|
||||
node->children.push_back(std::make_shared<ASTLiteral>(name));
|
||||
|
||||
break;
|
||||
@ -513,11 +526,9 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
|
||||
}
|
||||
else if (query.type == AuthenticationType::SSL_CERTIFICATE)
|
||||
{
|
||||
boost::container::flat_set<String> common_names;
|
||||
auto ssl_cert_subject_type = parseSSLCertificateSubjectType(*query.ssl_cert_subject_type);
|
||||
for (const auto & arg : args)
|
||||
common_names.insert(checkAndGetLiteralArgument<String>(arg, "common_name"));
|
||||
|
||||
auth_data.setSSLCertificateCommonNames(std::move(common_names));
|
||||
auth_data.addSSLCertificateSubject(ssl_cert_subject_type, checkAndGetLiteralArgument<String>(arg, "ssl_certificate_subject"));
|
||||
}
|
||||
else if (query.type == AuthenticationType::HTTP)
|
||||
{
|
||||
|
@ -2,13 +2,14 @@
|
||||
|
||||
#include <Access/Common/AuthenticationType.h>
|
||||
#include <Access/Common/HTTPAuthenticationScheme.h>
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include <Common/SSHWrapper.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Parsers/Access/ASTAuthenticationData.h>
|
||||
|
||||
#include <vector>
|
||||
#include <base/types.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -58,8 +59,9 @@ public:
|
||||
const String & getKerberosRealm() const { return kerberos_realm; }
|
||||
void setKerberosRealm(const String & realm) { kerberos_realm = realm; }
|
||||
|
||||
const boost::container::flat_set<String> & getSSLCertificateCommonNames() const { return ssl_certificate_common_names; }
|
||||
void setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_);
|
||||
const SSLCertificateSubjects & getSSLCertificateSubjects() const { return ssl_certificate_subjects; }
|
||||
void setSSLCertificateSubjects(SSLCertificateSubjects && ssl_certificate_subjects_);
|
||||
void addSSLCertificateSubject(SSLCertificateSubjects::Type type_, String && subject_);
|
||||
|
||||
#if USE_SSH
|
||||
const std::vector<SSHKey> & getSSHKeys() const { return ssh_keys; }
|
||||
@ -96,7 +98,7 @@ private:
|
||||
Digest password_hash;
|
||||
String ldap_server_name;
|
||||
String kerberos_realm;
|
||||
boost::container::flat_set<String> ssl_certificate_common_names;
|
||||
SSLCertificateSubjects ssl_certificate_subjects;
|
||||
String salt;
|
||||
#if USE_SSH
|
||||
std::vector<SSHKey> ssh_keys;
|
||||
|
95
src/Access/Common/SSLCertificateSubjects.cpp
Normal file
95
src/Access/Common/SSLCertificateSubjects.cpp
Normal file
@ -0,0 +1,95 @@
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#if USE_SSL
|
||||
#include <openssl/x509v3.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
#if USE_SSL
|
||||
SSLCertificateSubjects extractSSLCertificateSubjects(const Poco::Net::X509Certificate & certificate)
|
||||
{
|
||||
|
||||
SSLCertificateSubjects subjects;
|
||||
if (!certificate.commonName().empty())
|
||||
{
|
||||
subjects.insert(SSLCertificateSubjects::Type::CN, certificate.commonName());
|
||||
}
|
||||
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wused-but-marked-unused"
|
||||
auto stackof_general_name_deleter = [](void * ptr) { GENERAL_NAMES_free(static_cast<STACK_OF(GENERAL_NAME) *>(ptr)); };
|
||||
std::unique_ptr<void, decltype(stackof_general_name_deleter)> cert_names(
|
||||
X509_get_ext_d2i(const_cast<X509 *>(certificate.certificate()), NID_subject_alt_name, nullptr, nullptr),
|
||||
stackof_general_name_deleter);
|
||||
|
||||
if (STACK_OF(GENERAL_NAME) * names = static_cast<STACK_OF(GENERAL_NAME) *>(cert_names.get()))
|
||||
{
|
||||
for (int i = 0; i < sk_GENERAL_NAME_num(names); ++i)
|
||||
{
|
||||
const GENERAL_NAME * name = sk_GENERAL_NAME_value(names, i);
|
||||
if (name->type == GEN_DNS || name->type == GEN_URI)
|
||||
{
|
||||
const char * data = reinterpret_cast<const char *>(ASN1_STRING_get0_data(name->d.ia5));
|
||||
std::size_t len = ASN1_STRING_length(name->d.ia5);
|
||||
std::string subject = (name->type == GEN_DNS ? "DNS:" : "URI:") + std::string(data, len);
|
||||
subjects.insert(SSLCertificateSubjects::Type::SAN, std::move(subject));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#pragma clang diagnostic pop
|
||||
return subjects;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void SSLCertificateSubjects::insert(const String & subject_type_, String && subject)
|
||||
{
|
||||
insert(parseSSLCertificateSubjectType(subject_type_), std::move(subject));
|
||||
}
|
||||
|
||||
void SSLCertificateSubjects::insert(Type subject_type_, String && subject)
|
||||
{
|
||||
subjects[static_cast<size_t>(subject_type_)].insert(std::move(subject));
|
||||
}
|
||||
|
||||
SSLCertificateSubjects::Type parseSSLCertificateSubjectType(const String & type_)
|
||||
{
|
||||
if (type_ == "CN")
|
||||
return SSLCertificateSubjects::Type::CN;
|
||||
if (type_ == "SAN")
|
||||
return SSLCertificateSubjects::Type::SAN;
|
||||
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown SSL Certificate Subject Type: {}", type_);
|
||||
}
|
||||
|
||||
String toString(SSLCertificateSubjects::Type type_)
|
||||
{
|
||||
switch (type_)
|
||||
{
|
||||
case SSLCertificateSubjects::Type::CN:
|
||||
return "CN";
|
||||
case SSLCertificateSubjects::Type::SAN:
|
||||
return "SAN";
|
||||
}
|
||||
}
|
||||
|
||||
bool operator==(const SSLCertificateSubjects & lhs, const SSLCertificateSubjects & rhs)
|
||||
{
|
||||
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
|
||||
{
|
||||
if (lhs.at(type) != rhs.at(type))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
48
src/Access/Common/SSLCertificateSubjects.h
Normal file
48
src/Access/Common/SSLCertificateSubjects.h
Normal file
@ -0,0 +1,48 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
#include <base/types.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/X509Certificate.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class SSLCertificateSubjects
|
||||
{
|
||||
public:
|
||||
using container = boost::container::flat_set<String>;
|
||||
enum class Type
|
||||
{
|
||||
CN,
|
||||
SAN
|
||||
};
|
||||
|
||||
private:
|
||||
std::array<container, size_t(Type::SAN) + 1> subjects;
|
||||
|
||||
public:
|
||||
inline const container & at(Type type_) const { return subjects[static_cast<size_t>(type_)]; }
|
||||
inline bool empty()
|
||||
{
|
||||
for (auto & subject_list : subjects)
|
||||
{
|
||||
if (!subject_list.empty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
void insert(const String & subject_type_, String && subject);
|
||||
void insert(Type type_, String && subject);
|
||||
friend bool operator==(const SSLCertificateSubjects & lhs, const SSLCertificateSubjects & rhs);
|
||||
};
|
||||
|
||||
String toString(SSLCertificateSubjects::Type type_);
|
||||
SSLCertificateSubjects::Type parseSSLCertificateSubjectType(const String & type_);
|
||||
|
||||
#if USE_SSL
|
||||
SSLCertificateSubjects extractSSLCertificateSubjects(const Poco::Net::X509Certificate & certificate);
|
||||
#endif
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
#include <Access/Credentials.h>
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -48,18 +48,18 @@ void AlwaysAllowCredentials::setUserName(const String & user_name_)
|
||||
user_name = user_name_;
|
||||
}
|
||||
|
||||
SSLCertificateCredentials::SSLCertificateCredentials(const String & user_name_, const String & common_name_)
|
||||
SSLCertificateCredentials::SSLCertificateCredentials(const String & user_name_, SSLCertificateSubjects && subjects_)
|
||||
: Credentials(user_name_)
|
||||
, common_name(common_name_)
|
||||
, certificate_subjects(subjects_)
|
||||
{
|
||||
is_ready = true;
|
||||
}
|
||||
|
||||
const String & SSLCertificateCredentials::getCommonName() const
|
||||
const SSLCertificateSubjects & SSLCertificateCredentials::getSSLCertificateSubjects() const
|
||||
{
|
||||
if (!isReady())
|
||||
throwNotReady();
|
||||
return common_name;
|
||||
return certificate_subjects;
|
||||
}
|
||||
|
||||
BasicCredentials::BasicCredentials()
|
||||
|
@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include <memory>
|
||||
|
||||
#include "config.h"
|
||||
@ -42,11 +44,11 @@ class SSLCertificateCredentials
|
||||
: public Credentials
|
||||
{
|
||||
public:
|
||||
explicit SSLCertificateCredentials(const String & user_name_, const String & common_name_);
|
||||
const String & getCommonName() const;
|
||||
explicit SSLCertificateCredentials(const String & user_name_, SSLCertificateSubjects && subjects_);
|
||||
const SSLCertificateSubjects & getSSLCertificateSubjects() const;
|
||||
|
||||
private:
|
||||
String common_name;
|
||||
SSLCertificateSubjects certificate_subjects;
|
||||
};
|
||||
|
||||
class BasicCredentials
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Access/UsersConfigAccessStorage.h>
|
||||
#include <Access/Common/SSLCertificateSubjects.h>
|
||||
#include <Access/Quota.h>
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Access/User.h>
|
||||
@ -194,18 +195,23 @@ namespace
|
||||
/// Fill list of allowed certificates.
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(certificates_config, keys);
|
||||
boost::container::flat_set<String> common_names;
|
||||
for (const String & key : keys)
|
||||
{
|
||||
if (key.starts_with("common_name"))
|
||||
{
|
||||
String value = config.getString(certificates_config + "." + key);
|
||||
common_names.insert(std::move(value));
|
||||
user->auth_data.addSSLCertificateSubject(SSLCertificateSubjects::Type::CN, std::move(value));
|
||||
}
|
||||
else if (key.starts_with("subject_alt_name"))
|
||||
{
|
||||
String value = config.getString(certificates_config + "." + key);
|
||||
if (value.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected ssl_certificates.subject_alt_name to not be empty");
|
||||
user->auth_data.addSSLCertificateSubject(SSLCertificateSubjects::Type::SAN, std::move(value));
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown certificate pattern type: {}", key);
|
||||
}
|
||||
user->auth_data.setSSLCertificateCommonNames(std::move(common_names));
|
||||
}
|
||||
else if (has_ssh_keys)
|
||||
{
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -164,32 +165,15 @@ private:
|
||||
|
||||
auto aggregate_function_clone = aggregate_function->clone();
|
||||
auto & aggregate_function_clone_typed = aggregate_function_clone->as<FunctionNode &>();
|
||||
|
||||
aggregate_function_clone_typed.getArguments().getNodes() = { arithmetic_function_clone_argument };
|
||||
resolveAggregateFunctionNode(aggregate_function_clone_typed, arithmetic_function_clone_argument, result_aggregate_function_name);
|
||||
resolveAggregateFunctionNodeByName(aggregate_function_clone_typed, result_aggregate_function_name);
|
||||
|
||||
arithmetic_function_clone_arguments_nodes[arithmetic_function_argument_index] = std::move(aggregate_function_clone);
|
||||
resolveOrdinaryFunctionNode(arithmetic_function_clone_typed, arithmetic_function_clone_typed.getFunctionName());
|
||||
resolveOrdinaryFunctionNodeByName(arithmetic_function_clone_typed, arithmetic_function_clone_typed.getFunctionName(), getContext());
|
||||
|
||||
return arithmetic_function_clone;
|
||||
}
|
||||
|
||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
static void resolveAggregateFunctionNode(FunctionNode & function_node, const QueryTreeNodePtr & argument, const String & aggregate_function_name)
|
||||
{
|
||||
auto function_aggregate_function = function_node.getAggregateFunction();
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto action = NullsAction::EMPTY;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(
|
||||
aggregate_function_name, action, {argument->getResultType()}, function_aggregate_function->getParameters(), properties);
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -18,19 +19,18 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class ComparisonTupleEliminationPassVisitor : public InDepthQueryTreeVisitor<ComparisonTupleEliminationPassVisitor>
|
||||
class ComparisonTupleEliminationPassVisitor : public InDepthQueryTreeVisitorWithContext<ComparisonTupleEliminationPassVisitor>
|
||||
{
|
||||
public:
|
||||
explicit ComparisonTupleEliminationPassVisitor(ContextPtr context_)
|
||||
: context(std::move(context_))
|
||||
{}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ComparisonTupleEliminationPassVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
static bool needChildVisit(QueryTreeNodePtr &, QueryTreeNodePtr & child)
|
||||
{
|
||||
return child->getNodeType() != QueryTreeNodeType::TABLE_FUNCTION;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
void enterImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
@ -171,13 +171,13 @@ private:
|
||||
{
|
||||
auto result_function = std::make_shared<FunctionNode>("and");
|
||||
result_function->getArguments().getNodes() = std::move(tuple_arguments_equals_functions);
|
||||
resolveOrdinaryFunctionNode(*result_function, result_function->getFunctionName());
|
||||
resolveOrdinaryFunctionNodeByName(*result_function, result_function->getFunctionName(), getContext());
|
||||
|
||||
if (comparison_function_name == "notEquals")
|
||||
{
|
||||
auto not_function = std::make_shared<FunctionNode>("not");
|
||||
not_function->getArguments().getNodes().push_back(std::move(result_function));
|
||||
resolveOrdinaryFunctionNode(*not_function, not_function->getFunctionName());
|
||||
resolveOrdinaryFunctionNodeByName(*not_function, not_function->getFunctionName(), getContext());
|
||||
result_function = std::move(not_function);
|
||||
}
|
||||
|
||||
@ -197,18 +197,10 @@ private:
|
||||
comparison_function->getArguments().getNodes().push_back(std::move(lhs_argument));
|
||||
comparison_function->getArguments().getNodes().push_back(std::move(rhs_argument));
|
||||
|
||||
resolveOrdinaryFunctionNode(*comparison_function, comparison_function->getFunctionName());
|
||||
resolveOrdinaryFunctionNodeByName(*comparison_function, comparison_function->getFunctionName(), getContext());
|
||||
|
||||
return comparison_function;
|
||||
}
|
||||
|
||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, context);
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -77,11 +78,9 @@ public:
|
||||
|
||||
/// Replace `countDistinct` of initial query into `count`
|
||||
auto result_type = function_node->getResultType();
|
||||
AggregateFunctionProperties properties;
|
||||
auto action = NullsAction::EMPTY;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get("count", action, {}, {}, properties);
|
||||
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
|
||||
function_node->getArguments().getNodes().clear();
|
||||
resolveAggregateFunctionNodeByName(*function_node, "count");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
@ -16,6 +17,9 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <Analyzer/TableFunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/JoinNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -23,202 +27,410 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionToSubcolumnsVisitor : public InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitor>
|
||||
struct ColumnContext
|
||||
{
|
||||
NameAndTypePair column;
|
||||
QueryTreeNodePtr column_source;
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
using NodeToSubcolumnTransformer = std::function<void(QueryTreeNodePtr &, FunctionNode &, ColumnContext &)>;
|
||||
|
||||
void optimizeFunctionLength(QueryTreeNodePtr & node, FunctionNode &, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `length(argument)` with `argument.size0`
|
||||
/// `argument` may be Array or Map.
|
||||
|
||||
NameAndTypePair column{ctx.column.name + ".size0", std::make_shared<DataTypeUInt64>()};
|
||||
node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
}
|
||||
|
||||
template <bool positive>
|
||||
void optimizeFunctionEmpty(QueryTreeNodePtr &, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `empty(argument)` with `equals(argument.size0, 0)` if positive
|
||||
/// Replace `notEmpty(argument)` with `notEquals(argument.size0, 0)` if not positive
|
||||
/// `argument` may be Array or Map.
|
||||
|
||||
NameAndTypePair column{ctx.column.name + ".size0", std::make_shared<DataTypeUInt64>()};
|
||||
auto & function_arguments_nodes = function_node.getArguments().getNodes();
|
||||
|
||||
function_arguments_nodes.clear();
|
||||
function_arguments_nodes.push_back(std::make_shared<ColumnNode>(column, ctx.column_source));
|
||||
function_arguments_nodes.push_back(std::make_shared<ConstantNode>(static_cast<UInt64>(0)));
|
||||
|
||||
const auto * function_name = positive ? "equals" : "notEquals";
|
||||
resolveOrdinaryFunctionNodeByName(function_node, function_name, ctx.context);
|
||||
}
|
||||
|
||||
String getSubcolumnNameForElement(const Field & value, const DataTypeTuple & data_type_tuple)
|
||||
{
|
||||
if (value.getType() == Field::Types::String)
|
||||
return value.get<const String &>();
|
||||
|
||||
if (value.getType() == Field::Types::UInt64)
|
||||
return data_type_tuple.getNameByPosition(value.get<UInt64>());
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
String getSubcolumnNameForElement(const Field & value, const DataTypeVariant &)
|
||||
{
|
||||
if (value.getType() == Field::Types::String)
|
||||
return value.get<const String &>();
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
template <typename DataType>
|
||||
void optimizeTupleOrVariantElement(QueryTreeNodePtr & node, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `tupleElement(tuple_argument, string_literal)`, `tupleElement(tuple_argument, integer_literal)` with `tuple_argument.column_name`.
|
||||
/// Replace `variantElement(variant_argument, string_literal)` with `variant_argument.column_name`.
|
||||
|
||||
auto & function_arguments_nodes = function_node.getArguments().getNodes();
|
||||
if (function_arguments_nodes.size() != 2)
|
||||
return;
|
||||
|
||||
const auto * second_argument_constant_node = function_arguments_nodes[1]->as<ConstantNode>();
|
||||
if (!second_argument_constant_node)
|
||||
return;
|
||||
|
||||
const auto & data_type_concrete = assert_cast<const DataType &>(*ctx.column.type);
|
||||
auto subcolumn_name = getSubcolumnNameForElement(second_argument_constant_node->getValue(), data_type_concrete);
|
||||
|
||||
if (subcolumn_name.empty())
|
||||
return;
|
||||
|
||||
NameAndTypePair column{ctx.column.name + "." + subcolumn_name, function_node.getResultType()};
|
||||
node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
}
|
||||
|
||||
std::map<std::pair<TypeIndex, String>, NodeToSubcolumnTransformer> node_transformers =
|
||||
{
|
||||
{
|
||||
{TypeIndex::Array, "length"}, optimizeFunctionLength,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Array, "empty"}, optimizeFunctionEmpty<true>,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Array, "notEmpty"}, optimizeFunctionEmpty<false>,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "length"}, optimizeFunctionLength,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "empty"}, optimizeFunctionEmpty<true>,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "notEmpty"}, optimizeFunctionEmpty<false>,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "mapKeys"},
|
||||
[](QueryTreeNodePtr & node, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `mapKeys(map_argument)` with `map_argument.keys`
|
||||
NameAndTypePair column{ctx.column.name + ".keys", function_node.getResultType()};
|
||||
node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "mapValues"},
|
||||
[](QueryTreeNodePtr & node, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `mapValues(map_argument)` with `map_argument.values`
|
||||
NameAndTypePair column{ctx.column.name + ".values", function_node.getResultType()};
|
||||
node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Map, "mapContains"},
|
||||
[](QueryTreeNodePtr &, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `mapContains(map_argument, argument)` with `has(map_argument.keys, argument)`
|
||||
const auto & data_type_map = assert_cast<const DataTypeMap &>(*ctx.column.type);
|
||||
|
||||
NameAndTypePair column{ctx.column.name + ".keys", std::make_shared<DataTypeArray>(data_type_map.getKeyType())};
|
||||
auto & function_arguments_nodes = function_node.getArguments().getNodes();
|
||||
|
||||
auto has_function_argument = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
function_arguments_nodes[0] = std::move(has_function_argument);
|
||||
|
||||
resolveOrdinaryFunctionNodeByName(function_node, "has", ctx.context);
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Nullable, "count"},
|
||||
[](QueryTreeNodePtr &, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `count(nullable_argument)` with `sum(not(nullable_argument.null))`
|
||||
NameAndTypePair column{ctx.column.name + ".null", std::make_shared<DataTypeUInt8>()};
|
||||
auto & function_arguments_nodes = function_node.getArguments().getNodes();
|
||||
|
||||
auto new_column_node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
auto function_node_not = std::make_shared<FunctionNode>("not");
|
||||
|
||||
function_node_not->getArguments().getNodes().push_back(std::move(new_column_node));
|
||||
resolveOrdinaryFunctionNodeByName(*function_node_not, "not", ctx.context);
|
||||
|
||||
function_arguments_nodes = {std::move(function_node_not)};
|
||||
resolveAggregateFunctionNodeByName(function_node, "sum");
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Nullable, "isNull"},
|
||||
[](QueryTreeNodePtr & node, FunctionNode &, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `isNull(nullable_argument)` with `nullable_argument.null`
|
||||
NameAndTypePair column{ctx.column.name + ".null", std::make_shared<DataTypeUInt8>()};
|
||||
node = std::make_shared<ColumnNode>(column, ctx.column_source);
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Nullable, "isNotNull"},
|
||||
[](QueryTreeNodePtr &, FunctionNode & function_node, ColumnContext & ctx)
|
||||
{
|
||||
/// Replace `isNotNull(nullable_argument)` with `not(nullable_argument.null)`
|
||||
NameAndTypePair column{ctx.column.name + ".null", std::make_shared<DataTypeUInt8>()};
|
||||
auto & function_arguments_nodes = function_node.getArguments().getNodes();
|
||||
|
||||
function_arguments_nodes = {std::make_shared<ColumnNode>(column, ctx.column_source)};
|
||||
resolveOrdinaryFunctionNodeByName(function_node, "not", ctx.context);
|
||||
},
|
||||
},
|
||||
{
|
||||
{TypeIndex::Tuple, "tupleElement"}, optimizeTupleOrVariantElement<DataTypeTuple>,
|
||||
},
|
||||
{
|
||||
{TypeIndex::Variant, "variantElement"}, optimizeTupleOrVariantElement<DataTypeVariant>,
|
||||
},
|
||||
};
|
||||
|
||||
std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimization(const QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return {};
|
||||
|
||||
auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
||||
if (function_arguments_nodes.empty() || function_arguments_nodes.size() > 2)
|
||||
return {};
|
||||
|
||||
auto * first_argument_column_node = function_arguments_nodes.front()->as<ColumnNode>();
|
||||
if (!first_argument_column_node || first_argument_column_node->getColumnName() == "__grouping_set")
|
||||
return {};
|
||||
|
||||
auto column_source = first_argument_column_node->getColumnSource();
|
||||
auto * table_node = column_source->as<TableNode>();
|
||||
if (!table_node)
|
||||
return {};
|
||||
|
||||
const auto & storage = table_node->getStorage();
|
||||
const auto & storage_snapshot = table_node->getStorageSnapshot();
|
||||
auto column = first_argument_column_node->getColumn();
|
||||
|
||||
if (!storage->supportsOptimizationToSubcolumns() || storage->isVirtualColumn(column.name, storage_snapshot->metadata))
|
||||
return {};
|
||||
|
||||
auto column_in_table = storage_snapshot->tryGetColumn(GetColumnsOptions::All, column.name);
|
||||
if (!column_in_table || !column_in_table->type->equals(*column.type))
|
||||
return {};
|
||||
|
||||
return std::make_tuple(function_node, first_argument_column_node, table_node);
|
||||
}
|
||||
|
||||
/// First pass collects info about identifiers to determine which identifiers are allowed to optimize.
|
||||
class FunctionToSubcolumnsVisitorFirstPass : public InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitorFirstPass>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitor>;
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitorFirstPass>;
|
||||
using Base::Base;
|
||||
|
||||
void enterImpl(const QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
return;
|
||||
|
||||
if (auto * table_node = node->as<TableNode>())
|
||||
{
|
||||
enterImpl(*table_node);
|
||||
return;
|
||||
}
|
||||
|
||||
if (auto * column_node = node->as<ColumnNode>())
|
||||
{
|
||||
enterImpl(*column_node);
|
||||
return;
|
||||
}
|
||||
|
||||
auto [function_node, first_argument_node, table_node] = getTypedNodesForOptimization(node);
|
||||
if (function_node && first_argument_node && table_node)
|
||||
{
|
||||
enterImpl(*function_node, *first_argument_node, *table_node);
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * join_node = node->as<JoinNode>())
|
||||
{
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef().join_use_nulls;
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * query_node = node->as<QueryNode>())
|
||||
{
|
||||
if (query_node->isGroupByWithCube() || query_node->isGroupByWithRollup() || query_node->isGroupByWithGroupingSets())
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef().group_by_use_nulls;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::unordered_set<Identifier> getIdentifiersToOptimize() const
|
||||
{
|
||||
if (can_wrap_result_columns_with_nullable)
|
||||
{
|
||||
/// Do not optimize if we have JOIN with setting join_use_null.
|
||||
/// Do not optimize if we have GROUP BY WITH ROLLUP/CUBE/GROUPING SETS with setting group_by_use_nulls.
|
||||
/// It may change the behaviour if subcolumn can be converted
|
||||
/// to Nullable while the original column cannot (e.g. for Array type).
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Do not optimize if full column is requested in other context.
|
||||
/// It doesn't make sense because it doesn't reduce amount of read data
|
||||
/// and optimized functions are not computation heavy. But introducing
|
||||
/// new identifier complicates query analysis and may break it.
|
||||
///
|
||||
/// E.g. query:
|
||||
/// SELECT n FROM table GROUP BY n HAVING isNotNull(n)
|
||||
/// may be optimized to incorrect query:
|
||||
/// SELECT n FROM table GROUP BY n HAVING not(n.null)
|
||||
/// Will produce: `n.null` is not under aggregate function and not in GROUP BY keys)
|
||||
///
|
||||
/// Do not optimize index columns (primary, min-max, secondary),
|
||||
/// because otherwise analysis of indexes may be broken.
|
||||
/// TODO: handle subcolumns in index analysis.
|
||||
|
||||
std::unordered_set<Identifier> identifiers_to_optimize;
|
||||
for (const auto & [identifier, count] : optimized_identifiers_count)
|
||||
{
|
||||
if (all_key_columns.contains(identifier))
|
||||
continue;
|
||||
|
||||
auto it = identifiers_count.find(identifier);
|
||||
if (it != identifiers_count.end() && it->second == count)
|
||||
identifiers_to_optimize.insert(identifier);
|
||||
}
|
||||
|
||||
return identifiers_to_optimize;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_set<Identifier> all_key_columns;
|
||||
std::unordered_map<Identifier, UInt64> identifiers_count;
|
||||
std::unordered_map<Identifier, UInt64> optimized_identifiers_count;
|
||||
|
||||
NameSet processed_tables;
|
||||
bool can_wrap_result_columns_with_nullable = false;
|
||||
|
||||
void enterImpl(const TableNode & table_node)
|
||||
{
|
||||
auto table_name = table_node.getStorage()->getStorageID().getFullTableName();
|
||||
if (processed_tables.emplace(table_name).second)
|
||||
return;
|
||||
|
||||
auto add_key_columns = [&](const auto & key_columns)
|
||||
{
|
||||
for (const auto & column_name : key_columns)
|
||||
{
|
||||
Identifier identifier({table_name, column_name});
|
||||
all_key_columns.insert(identifier);
|
||||
}
|
||||
};
|
||||
|
||||
const auto & metadata_snapshot = table_node.getStorageSnapshot()->metadata;
|
||||
const auto & primary_key_columns = metadata_snapshot->getColumnsRequiredForPrimaryKey();
|
||||
const auto & partition_key_columns = metadata_snapshot->getColumnsRequiredForPartitionKey();
|
||||
|
||||
add_key_columns(primary_key_columns);
|
||||
add_key_columns(partition_key_columns);
|
||||
|
||||
for (const auto & index : metadata_snapshot->getSecondaryIndices())
|
||||
{
|
||||
const auto & index_columns = index.expression->getRequiredColumns();
|
||||
add_key_columns(index_columns);
|
||||
}
|
||||
}
|
||||
|
||||
void enterImpl(const ColumnNode & column_node)
|
||||
{
|
||||
if (column_node.getColumnName() == "__grouping_set")
|
||||
return;
|
||||
|
||||
auto column_source = column_node.getColumnSource();
|
||||
auto * table_node = column_source->as<TableNode>();
|
||||
if (!table_node)
|
||||
return;
|
||||
|
||||
auto table_name = table_node->getStorage()->getStorageID().getFullTableName();
|
||||
Identifier qualified_name({table_name, column_node.getColumnName()});
|
||||
|
||||
++identifiers_count[qualified_name];
|
||||
}
|
||||
|
||||
void enterImpl(const FunctionNode & function_node, const ColumnNode & first_argument_column_node, const TableNode & table_node)
|
||||
{
|
||||
/// For queries with FINAL converting function to subcolumn may alter
|
||||
/// special merging algorithms and produce wrong result of query.
|
||||
if (table_node.hasTableExpressionModifiers() && table_node.getTableExpressionModifiers()->hasFinal())
|
||||
return;
|
||||
|
||||
const auto & column = first_argument_column_node.getColumn();
|
||||
auto table_name = table_node.getStorage()->getStorageID().getFullTableName();
|
||||
Identifier qualified_name({table_name, column.name});
|
||||
|
||||
if (node_transformers.contains({column.type->getTypeId(), function_node.getFunctionName()}))
|
||||
++optimized_identifiers_count[qualified_name];
|
||||
}
|
||||
};
|
||||
|
||||
/// Second pass optimizes functions to subcolumns for allowed identifiers.
|
||||
class FunctionToSubcolumnsVisitorSecondPass : public InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitorSecondPass>
|
||||
{
|
||||
private:
|
||||
std::unordered_set<Identifier> identifiers_to_optimize;
|
||||
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitorSecondPass>;
|
||||
using Base::Base;
|
||||
|
||||
FunctionToSubcolumnsVisitorSecondPass(ContextPtr context_, std::unordered_set<Identifier> identifiers_to_optimize_)
|
||||
: Base(std::move(context_)), identifiers_to_optimize(std::move(identifiers_to_optimize_))
|
||||
{
|
||||
}
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return;
|
||||
|
||||
auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
||||
size_t function_arguments_nodes_size = function_arguments_nodes.size();
|
||||
|
||||
if (function_arguments_nodes.empty() || function_arguments_nodes_size > 2)
|
||||
return;
|
||||
|
||||
auto * first_argument_column_node = function_arguments_nodes.front()->as<ColumnNode>();
|
||||
|
||||
if (!first_argument_column_node)
|
||||
return;
|
||||
|
||||
if (first_argument_column_node->getColumnName() == "__grouping_set")
|
||||
return;
|
||||
|
||||
auto column_source = first_argument_column_node->getColumnSource();
|
||||
auto * table_node = column_source->as<TableNode>();
|
||||
|
||||
if (!table_node)
|
||||
return;
|
||||
|
||||
const auto & storage = table_node->getStorage();
|
||||
if (!storage->supportsSubcolumns())
|
||||
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node);
|
||||
if (!function_node || !first_argument_column_node || !table_node)
|
||||
return;
|
||||
|
||||
auto column = first_argument_column_node->getColumn();
|
||||
WhichDataType column_type(column.type);
|
||||
auto table_name = table_node->getStorage()->getStorageID().getFullTableName();
|
||||
|
||||
const auto & function_name = function_node->getFunctionName();
|
||||
Identifier qualified_name({table_name, column.name});
|
||||
if (!identifiers_to_optimize.contains(qualified_name))
|
||||
return;
|
||||
|
||||
if (function_arguments_nodes_size == 1)
|
||||
auto transformer_it = node_transformers.find({column.type->getTypeId(), function_node->getFunctionName()});
|
||||
if (transformer_it != node_transformers.end())
|
||||
{
|
||||
if (column_type.isArray())
|
||||
{
|
||||
if (function_name == "length")
|
||||
{
|
||||
/// Replace `length(array_argument)` with `array_argument.size0`
|
||||
column.name += ".size0";
|
||||
column.type = std::make_shared<DataTypeUInt64>();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
else if (function_name == "empty")
|
||||
{
|
||||
/// Replace `empty(array_argument)` with `equals(array_argument.size0, 0)`
|
||||
column.name += ".size0";
|
||||
column.type = std::make_shared<DataTypeUInt64>();
|
||||
|
||||
function_arguments_nodes.clear();
|
||||
function_arguments_nodes.push_back(std::make_shared<ColumnNode>(column, column_source));
|
||||
function_arguments_nodes.push_back(std::make_shared<ConstantNode>(static_cast<UInt64>(0)));
|
||||
|
||||
resolveOrdinaryFunctionNode(*function_node, "equals");
|
||||
}
|
||||
else if (function_name == "notEmpty")
|
||||
{
|
||||
/// Replace `notEmpty(array_argument)` with `notEquals(array_argument.size0, 0)`
|
||||
column.name += ".size0";
|
||||
column.type = std::make_shared<DataTypeUInt64>();
|
||||
|
||||
function_arguments_nodes.clear();
|
||||
function_arguments_nodes.push_back(std::make_shared<ColumnNode>(column, column_source));
|
||||
function_arguments_nodes.push_back(std::make_shared<ConstantNode>(static_cast<UInt64>(0)));
|
||||
|
||||
resolveOrdinaryFunctionNode(*function_node, "notEquals");
|
||||
}
|
||||
}
|
||||
else if (column_type.isNullable())
|
||||
{
|
||||
if (function_name == "isNull")
|
||||
{
|
||||
/// Replace `isNull(nullable_argument)` with `nullable_argument.null`
|
||||
column.name += ".null";
|
||||
column.type = std::make_shared<DataTypeUInt8>();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
else if (function_name == "isNotNull")
|
||||
{
|
||||
/// Replace `isNotNull(nullable_argument)` with `not(nullable_argument.null)`
|
||||
column.name += ".null";
|
||||
column.type = std::make_shared<DataTypeUInt8>();
|
||||
|
||||
function_arguments_nodes = {std::make_shared<ColumnNode>(column, column_source)};
|
||||
|
||||
resolveOrdinaryFunctionNode(*function_node, "not");
|
||||
}
|
||||
}
|
||||
else if (column_type.isMap())
|
||||
{
|
||||
if (function_name == "mapKeys")
|
||||
{
|
||||
/// Replace `mapKeys(map_argument)` with `map_argument.keys`
|
||||
column.name += ".keys";
|
||||
column.type = function_node->getResultType();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
else if (function_name == "mapValues")
|
||||
{
|
||||
/// Replace `mapValues(map_argument)` with `map_argument.values`
|
||||
column.name += ".values";
|
||||
column.type = function_node->getResultType();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
}
|
||||
ColumnContext ctx{std::move(column), first_argument_column_node->getColumnSource(), getContext()};
|
||||
transformer_it->second(node, *function_node, ctx);
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto * second_argument_constant_node = function_arguments_nodes[1]->as<ConstantNode>();
|
||||
|
||||
if (function_name == "tupleElement" && column_type.isTuple() && second_argument_constant_node)
|
||||
{
|
||||
/** Replace `tupleElement(tuple_argument, string_literal)`, `tupleElement(tuple_argument, integer_literal)`
|
||||
* with `tuple_argument.column_name`.
|
||||
*/
|
||||
const auto & tuple_element_constant_value = second_argument_constant_node->getValue();
|
||||
const auto & tuple_element_constant_value_type = tuple_element_constant_value.getType();
|
||||
|
||||
const auto & data_type_tuple = assert_cast<const DataTypeTuple &>(*column.type);
|
||||
|
||||
String subcolumn_name;
|
||||
|
||||
if (tuple_element_constant_value_type == Field::Types::String)
|
||||
{
|
||||
subcolumn_name = tuple_element_constant_value.get<const String &>();
|
||||
}
|
||||
else if (tuple_element_constant_value_type == Field::Types::UInt64)
|
||||
{
|
||||
auto tuple_column_index = tuple_element_constant_value.get<UInt64>();
|
||||
subcolumn_name = data_type_tuple.getNameByPosition(tuple_column_index);
|
||||
}
|
||||
else
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
column.name += '.';
|
||||
column.name += subcolumn_name;
|
||||
column.type = function_node->getResultType();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
else if (function_name == "variantElement" && isVariant(column_type) && second_argument_constant_node)
|
||||
{
|
||||
/// Replace `variantElement(variant_argument, type_name)` with `variant_argument.type_name`.
|
||||
const auto & variant_element_constant_value = second_argument_constant_node->getValue();
|
||||
String subcolumn_name;
|
||||
|
||||
if (variant_element_constant_value.getType() != Field::Types::String)
|
||||
return;
|
||||
|
||||
subcolumn_name = variant_element_constant_value.get<const String &>();
|
||||
|
||||
column.name += '.';
|
||||
column.name += subcolumn_name;
|
||||
column.type = function_node->getResultType();
|
||||
|
||||
node = std::make_shared<ColumnNode>(column, column_source);
|
||||
}
|
||||
else if (function_name == "mapContains" && column_type.isMap())
|
||||
{
|
||||
const auto & data_type_map = assert_cast<const DataTypeMap &>(*column.type);
|
||||
|
||||
/// Replace `mapContains(map_argument, argument)` with `has(map_argument.keys, argument)`
|
||||
column.name += ".keys";
|
||||
column.type = std::make_shared<DataTypeArray>(data_type_map.getKeyType());
|
||||
|
||||
auto has_function_argument = std::make_shared<ColumnNode>(column, column_source);
|
||||
function_arguments_nodes[0] = std::move(has_function_argument);
|
||||
|
||||
resolveOrdinaryFunctionNode(*function_node, "has");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
};
|
||||
|
||||
@ -226,8 +438,15 @@ private:
|
||||
|
||||
void FunctionToSubcolumnsPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||
{
|
||||
FunctionToSubcolumnsVisitor visitor(context);
|
||||
visitor.visit(query_tree_node);
|
||||
FunctionToSubcolumnsVisitorFirstPass first_visitor(context);
|
||||
first_visitor.visit(query_tree_node);
|
||||
auto identifiers_to_optimize = first_visitor.getIdentifiersToOptimize();
|
||||
|
||||
if (identifiers_to_optimize.empty())
|
||||
return;
|
||||
|
||||
FunctionToSubcolumnsVisitorSecondPass second_visitor(std::move(context), std::move(identifiers_to_optimize));
|
||||
second_visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
@ -47,25 +48,17 @@ public:
|
||||
|
||||
if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull())
|
||||
{
|
||||
resolveAsCountAggregateFunction(*function_node);
|
||||
function_node->getArguments().getNodes().clear();
|
||||
resolveAggregateFunctionNodeByName(*function_node, "count");
|
||||
}
|
||||
else if (function_node->getFunctionName() == "sum" &&
|
||||
first_argument_constant_literal.getType() == Field::Types::UInt64 &&
|
||||
first_argument_constant_literal.get<UInt64>() == 1)
|
||||
{
|
||||
resolveAsCountAggregateFunction(*function_node);
|
||||
function_node->getArguments().getNodes().clear();
|
||||
resolveAggregateFunctionNodeByName(*function_node, "count");
|
||||
}
|
||||
}
|
||||
private:
|
||||
static void resolveAsCountAggregateFunction(FunctionNode & function_node)
|
||||
{
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get("count", NullsAction::EMPTY, {}, {}, properties);
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
@ -74,8 +74,7 @@ public:
|
||||
|
||||
new_arguments[1] = std::move(if_arguments_nodes[0]);
|
||||
function_arguments_nodes = std::move(new_arguments);
|
||||
resolveAsAggregateFunctionWithIf(
|
||||
*function_node, {function_arguments_nodes[0]->getResultType(), function_arguments_nodes[1]->getResultType()});
|
||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
||||
}
|
||||
}
|
||||
else if (first_const_node)
|
||||
@ -104,27 +103,10 @@ public:
|
||||
new_arguments[1] = std::move(not_function);
|
||||
|
||||
function_arguments_nodes = std::move(new_arguments);
|
||||
resolveAsAggregateFunctionWithIf(
|
||||
*function_node, {function_arguments_nodes[0]->getResultType(), function_arguments_nodes[1]->getResultType()});
|
||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static void resolveAsAggregateFunctionWithIf(FunctionNode & function_node, const DataTypes & argument_types)
|
||||
{
|
||||
auto result_type = function_node.getResultType();
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(
|
||||
function_node.getFunctionName() + "If",
|
||||
function_node.getNullsAction(),
|
||||
argument_types,
|
||||
function_node.getAggregateFunction()->getParameters(),
|
||||
properties);
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -73,23 +73,24 @@ public:
|
||||
|
||||
const auto lhs = std::make_shared<FunctionNode>("sum");
|
||||
lhs->getArguments().getNodes().push_back(func_plus_minus_nodes[column_id]);
|
||||
resolveAsAggregateFunctionNode(*lhs, column_type);
|
||||
resolveAggregateFunctionNodeByName(*lhs, lhs->getFunctionName());
|
||||
|
||||
const auto rhs_count = std::make_shared<FunctionNode>("count");
|
||||
rhs_count->getArguments().getNodes().push_back(func_plus_minus_nodes[column_id]);
|
||||
resolveAsAggregateFunctionNode(*rhs_count, column_type);
|
||||
resolveAggregateFunctionNodeByName(*rhs_count, rhs_count->getFunctionName());
|
||||
|
||||
const auto rhs = std::make_shared<FunctionNode>("multiply");
|
||||
rhs->getArguments().getNodes().push_back(func_plus_minus_nodes[literal_id]);
|
||||
rhs->getArguments().getNodes().push_back(rhs_count);
|
||||
resolveOrdinaryFunctionNode(*rhs, rhs->getFunctionName());
|
||||
resolveOrdinaryFunctionNodeByName(*rhs, rhs->getFunctionName(), getContext());
|
||||
|
||||
auto new_node = std::make_shared<FunctionNode>(Poco::toLower(func_plus_minus_node->getFunctionName()));
|
||||
if (column_id == 0)
|
||||
new_node->getArguments().getNodes() = {lhs, rhs};
|
||||
else if (column_id == 1)
|
||||
new_node->getArguments().getNodes() = {rhs, lhs};
|
||||
resolveOrdinaryFunctionNode(*new_node, new_node->getFunctionName());
|
||||
|
||||
resolveOrdinaryFunctionNodeByName(*new_node, new_node->getFunctionName(), getContext());
|
||||
|
||||
if (!new_node)
|
||||
return;
|
||||
@ -100,28 +101,7 @@ public:
|
||||
res = createCastFunction(res, function_node->getResultType(), getContext());
|
||||
|
||||
node = std::move(res);
|
||||
|
||||
}
|
||||
|
||||
private:
|
||||
void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
const auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
static void resolveAsAggregateFunctionNode(FunctionNode & function_node, const DataTypePtr & argument_type)
|
||||
{
|
||||
AggregateFunctionProperties properties;
|
||||
const auto aggregate_function = AggregateFunctionFactory::instance().get(function_node.getFunctionName(),
|
||||
NullsAction::EMPTY,
|
||||
{argument_type},
|
||||
{},
|
||||
properties);
|
||||
|
||||
function_node.resolveAsAggregateFunction(aggregate_function);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
@ -65,7 +66,8 @@ public:
|
||||
auto multiplier_node = function_node_arguments_nodes[0];
|
||||
function_node_arguments_nodes[0] = std::move(function_node_arguments_nodes[1]);
|
||||
function_node_arguments_nodes.resize(1);
|
||||
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||
|
||||
resolveAggregateFunctionNodeByName(*function_node, "countIf");
|
||||
|
||||
if (constant_value_literal.get<UInt64>() != 1)
|
||||
{
|
||||
@ -115,7 +117,7 @@ public:
|
||||
function_node_arguments_nodes[0] = nested_if_function_arguments_nodes[0];
|
||||
function_node_arguments_nodes.resize(1);
|
||||
|
||||
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||
resolveAggregateFunctionNodeByName(*function_node, "countIf");
|
||||
|
||||
if (if_true_condition_value != 1)
|
||||
{
|
||||
@ -144,7 +146,7 @@ public:
|
||||
function_node_arguments_nodes[0] = std::move(not_function);
|
||||
function_node_arguments_nodes.resize(1);
|
||||
|
||||
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||
resolveAggregateFunctionNodeByName(*function_node, "countIf");
|
||||
|
||||
if (if_false_condition_value != 1)
|
||||
{
|
||||
@ -156,15 +158,6 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
static void resolveAsCountIfAggregateFunction(FunctionNode & function_node, const DataTypePtr & argument_type)
|
||||
{
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(
|
||||
"countIf", NullsAction::EMPTY, {argument_type}, function_node.getAggregateFunction()->getParameters(), properties);
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
|
||||
QueryTreeNodePtr getMultiplyFunction(QueryTreeNodePtr left, QueryTreeNodePtr right)
|
||||
{
|
||||
auto multiply_function_node = std::make_shared<FunctionNode>("multiply");
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -184,11 +185,8 @@ public:
|
||||
/// Replace uniq of initial query to count
|
||||
if (match_subquery_with_distinct() || match_subquery_with_group_by())
|
||||
{
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get("count", NullsAction::EMPTY, {}, {}, properties);
|
||||
|
||||
function_node->getArguments().getNodes().clear();
|
||||
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
resolveAggregateFunctionNodeByName(*function_node, "count");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -636,16 +636,16 @@ private:
|
||||
bool has_function = false;
|
||||
};
|
||||
|
||||
inline AggregateFunctionPtr resolveAggregateFunction(FunctionNode * function_node)
|
||||
inline AggregateFunctionPtr resolveAggregateFunction(FunctionNode & function_node, const String & function_name)
|
||||
{
|
||||
Array parameters;
|
||||
for (const auto & param : function_node->getParameters())
|
||||
for (const auto & param : function_node.getParameters())
|
||||
{
|
||||
auto * constant = param->as<ConstantNode>();
|
||||
parameters.push_back(constant->getValue());
|
||||
}
|
||||
|
||||
const auto & function_node_argument_nodes = function_node->getArguments().getNodes();
|
||||
const auto & function_node_argument_nodes = function_node.getArguments().getNodes();
|
||||
|
||||
DataTypes argument_types;
|
||||
argument_types.reserve(function_node_argument_nodes.size());
|
||||
@ -655,7 +655,7 @@ inline AggregateFunctionPtr resolveAggregateFunction(FunctionNode * function_nod
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto action = NullsAction::EMPTY;
|
||||
return AggregateFunctionFactory::instance().get(function_node->getFunctionName(), action, argument_types, parameters, properties);
|
||||
return AggregateFunctionFactory::instance().get(function_name, action, argument_types, parameters, properties);
|
||||
}
|
||||
|
||||
}
|
||||
@ -736,11 +736,11 @@ void rerunFunctionResolve(FunctionNode * function_node, ContextPtr context)
|
||||
{
|
||||
if (name == "nothing" || name == "nothingUInt64" || name == "nothingNull")
|
||||
return;
|
||||
function_node->resolveAsAggregateFunction(resolveAggregateFunction(function_node));
|
||||
function_node->resolveAsAggregateFunction(resolveAggregateFunction(*function_node, function_node->getFunctionName()));
|
||||
}
|
||||
else if (function_node->isWindowFunction())
|
||||
{
|
||||
function_node->resolveAsWindowFunction(resolveAggregateFunction(function_node));
|
||||
function_node->resolveAsWindowFunction(resolveAggregateFunction(*function_node, function_node->getFunctionName()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -793,6 +793,18 @@ QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_ty
|
||||
return function_node;
|
||||
}
|
||||
|
||||
void resolveOrdinaryFunctionNodeByName(FunctionNode & function_node, const String & function_name, const ContextPtr & context)
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, context);
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
void resolveAggregateFunctionNodeByName(FunctionNode & function_node, const String & function_name)
|
||||
{
|
||||
auto aggregate_function = resolveAggregateFunction(function_node, function_name);
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
|
||||
/** Returns:
|
||||
* {_, false} - multiple sources
|
||||
* {nullptr, true} - no sources (for constants)
|
||||
|
@ -112,6 +112,14 @@ NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node);
|
||||
/// Wrap node into `_CAST` function
|
||||
QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context);
|
||||
|
||||
/// Resolves function node as ordinary function with given name.
|
||||
/// Arguments and parameters are taken from the node.
|
||||
void resolveOrdinaryFunctionNodeByName(FunctionNode & function_node, const String & function_name, const ContextPtr & context);
|
||||
|
||||
/// Resolves function node as aggregate function with given name.
|
||||
/// Arguments and parameters are taken from the node.
|
||||
void resolveAggregateFunctionNodeByName(FunctionNode & function_node, const String & function_name);
|
||||
|
||||
/// Checks that node has only one source and returns it
|
||||
QueryTreeNodePtr getExpressionSource(const QueryTreeNodePtr & node);
|
||||
|
||||
|
@ -210,6 +210,7 @@ add_object_library(clickhouse_analyzer_passes Analyzer/Resolve)
|
||||
add_object_library(clickhouse_planner Planner)
|
||||
add_object_library(clickhouse_interpreters Interpreters)
|
||||
add_object_library(clickhouse_interpreters_cache Interpreters/Cache)
|
||||
add_object_library(clickhouse_interpreters_hash_join Interpreters/HashJoin)
|
||||
add_object_library(clickhouse_interpreters_access Interpreters/Access)
|
||||
add_object_library(clickhouse_interpreters_mysql Interpreters/MySQL)
|
||||
add_object_library(clickhouse_interpreters_clusterproxy Interpreters/ClusterProxy)
|
||||
|
@ -1206,11 +1206,8 @@ void ClientBase::receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, b
|
||||
if (local_format_error)
|
||||
std::rethrow_exception(local_format_error);
|
||||
|
||||
if (cancelled && is_interactive)
|
||||
{
|
||||
if (cancelled && is_interactive && !cancelled_printed.exchange(true))
|
||||
output_stream << "Query was cancelled." << std::endl;
|
||||
cancelled_printed = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1326,7 +1323,7 @@ void ClientBase::onEndOfStream()
|
||||
|
||||
if (is_interactive)
|
||||
{
|
||||
if (cancelled && !cancelled_printed)
|
||||
if (cancelled && !cancelled_printed.exchange(true))
|
||||
output_stream << "Query was cancelled." << std::endl;
|
||||
else if (!written_first_block)
|
||||
output_stream << "Ok." << std::endl;
|
||||
|
@ -338,8 +338,8 @@ protected:
|
||||
bool allow_repeated_settings = false;
|
||||
bool allow_merge_tree_settings = false;
|
||||
|
||||
bool cancelled = false;
|
||||
bool cancelled_printed = false;
|
||||
std::atomic_bool cancelled = false;
|
||||
std::atomic_bool cancelled_printed = false;
|
||||
|
||||
/// Unpacked descriptors and streams for the ease of use.
|
||||
int in_fd = STDIN_FILENO;
|
||||
|
@ -1,8 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <deque>
|
||||
#include <type_traits>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
@ -200,22 +198,18 @@ public:
|
||||
*/
|
||||
bool finish()
|
||||
{
|
||||
bool was_finished_before = false;
|
||||
|
||||
{
|
||||
std::lock_guard lock(queue_mutex);
|
||||
|
||||
if (is_finished)
|
||||
return true;
|
||||
|
||||
was_finished_before = is_finished;
|
||||
is_finished = true;
|
||||
}
|
||||
|
||||
pop_condition.notify_all();
|
||||
push_condition.notify_all();
|
||||
|
||||
return was_finished_before;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Returns if queue is finished
|
||||
|
@ -447,14 +447,18 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
|
||||
\
|
||||
M(AzureGetObject, "Number of Azure API GetObject calls.") \
|
||||
M(AzureUploadPart, "Number of Azure blob storage API UploadPart calls") \
|
||||
M(AzureUpload, "Number of Azure blob storage API Upload calls") \
|
||||
M(AzureStageBlock, "Number of Azure blob storage API StageBlock calls") \
|
||||
M(AzureCommitBlockList, "Number of Azure blob storage API CommitBlockList calls") \
|
||||
M(AzureCopyObject, "Number of Azure blob storage API CopyObject calls") \
|
||||
M(AzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \
|
||||
M(AzureListObjects, "Number of Azure blob storage API ListObjects calls.") \
|
||||
M(AzureGetProperties, "Number of Azure blob storage API GetProperties calls.") \
|
||||
\
|
||||
M(DiskAzureGetObject, "Number of Disk Azure API GetObject calls.") \
|
||||
M(DiskAzureUploadPart, "Number of Disk Azure blob storage API UploadPart calls") \
|
||||
M(DiskAzureUpload, "Number of Disk Azure blob storage API Upload calls") \
|
||||
M(DiskAzureStageBlock, "Number of Disk Azure blob storage API StageBlock calls") \
|
||||
M(DiskAzureCommitBlockList, "Number of Disk Azure blob storage API CommitBlockList calls") \
|
||||
M(DiskAzureCopyObject, "Number of Disk Azure blob storage API CopyObject calls") \
|
||||
M(DiskAzureListObjects, "Number of Disk Azure blob storage API ListObjects calls.") \
|
||||
M(DiskAzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \
|
||||
@ -611,6 +615,13 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(KeeperPacketsReceived, "Packets received by keeper server") \
|
||||
M(KeeperRequestTotal, "Total requests number on keeper server") \
|
||||
M(KeeperLatency, "Keeper latency") \
|
||||
M(KeeperTotalElapsedMicroseconds, "Keeper total latency for a single request") \
|
||||
M(KeeperProcessElapsedMicroseconds, "Keeper commit latency for a single request") \
|
||||
M(KeeperPreprocessElapsedMicroseconds, "Keeper preprocessing latency for a single reuquest") \
|
||||
M(KeeperStorageLockWaitMicroseconds, "Time spent waiting for acquiring Keeper storage lock") \
|
||||
M(KeeperCommitWaitElapsedMicroseconds, "Time spent waiting for certain log to be committed") \
|
||||
M(KeeperBatchMaxCount, "Number of times the size of batch was limited by the amount") \
|
||||
M(KeeperBatchMaxTotalSize, "Number of times the size of batch was limited by the total bytes size") \
|
||||
M(KeeperCommits, "Number of successful commits") \
|
||||
M(KeeperCommitsFailed, "Number of failed commits") \
|
||||
M(KeeperSnapshotCreations, "Number of snapshots creations")\
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <fmt/format.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <array>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
@ -29,7 +28,7 @@ void ZooKeeperResponse::write(WriteBuffer & out) const
|
||||
Coordination::write(buf.str(), out);
|
||||
}
|
||||
|
||||
std::string ZooKeeperRequest::toString() const
|
||||
std::string ZooKeeperRequest::toString(bool short_format) const
|
||||
{
|
||||
return fmt::format(
|
||||
"XID = {}\n"
|
||||
@ -37,7 +36,7 @@ std::string ZooKeeperRequest::toString() const
|
||||
"Additional info:\n{}",
|
||||
xid,
|
||||
getOpNum(),
|
||||
toStringImpl());
|
||||
toStringImpl(short_format));
|
||||
}
|
||||
|
||||
void ZooKeeperRequest::write(WriteBuffer & out) const
|
||||
@ -60,7 +59,7 @@ void ZooKeeperSyncRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(path, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperSyncRequest::toStringImpl() const
|
||||
std::string ZooKeeperSyncRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}", path);
|
||||
}
|
||||
@ -91,7 +90,7 @@ void ZooKeeperReconfigRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperReconfigRequest::toStringImpl() const
|
||||
std::string ZooKeeperReconfigRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"joining = {}\nleaving = {}\nnew_members = {}\nversion = {}",
|
||||
@ -145,7 +144,7 @@ void ZooKeeperAuthRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(data, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperAuthRequest::toStringImpl() const
|
||||
std::string ZooKeeperAuthRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"type = {}\n"
|
||||
@ -191,7 +190,7 @@ void ZooKeeperCreateRequest::readImpl(ReadBuffer & in)
|
||||
is_sequential = true;
|
||||
}
|
||||
|
||||
std::string ZooKeeperCreateRequest::toStringImpl() const
|
||||
std::string ZooKeeperCreateRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"path = {}\n"
|
||||
@ -218,7 +217,7 @@ void ZooKeeperRemoveRequest::writeImpl(WriteBuffer & out) const
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
std::string ZooKeeperRemoveRequest::toStringImpl() const
|
||||
std::string ZooKeeperRemoveRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"path = {}\n"
|
||||
@ -245,7 +244,7 @@ void ZooKeeperExistsRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperExistsRequest::toStringImpl() const
|
||||
std::string ZooKeeperExistsRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}", path);
|
||||
}
|
||||
@ -272,7 +271,7 @@ void ZooKeeperGetRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperGetRequest::toStringImpl() const
|
||||
std::string ZooKeeperGetRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}", path);
|
||||
}
|
||||
@ -303,7 +302,7 @@ void ZooKeeperSetRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperSetRequest::toStringImpl() const
|
||||
std::string ZooKeeperSetRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"path = {}\n"
|
||||
@ -334,7 +333,7 @@ void ZooKeeperListRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperListRequest::toStringImpl() const
|
||||
std::string ZooKeeperListRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}", path);
|
||||
}
|
||||
@ -356,7 +355,7 @@ void ZooKeeperFilteredListRequest::readImpl(ReadBuffer & in)
|
||||
list_request_type = static_cast<ListRequestType>(read_request_type);
|
||||
}
|
||||
|
||||
std::string ZooKeeperFilteredListRequest::toStringImpl() const
|
||||
std::string ZooKeeperFilteredListRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"path = {}\n"
|
||||
@ -401,7 +400,7 @@ void ZooKeeperSetACLRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperSetACLRequest::toStringImpl() const
|
||||
std::string ZooKeeperSetACLRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}\nversion = {}", path, version);
|
||||
}
|
||||
@ -426,7 +425,7 @@ void ZooKeeperGetACLRequest::writeImpl(WriteBuffer & out) const
|
||||
Coordination::write(path, out);
|
||||
}
|
||||
|
||||
std::string ZooKeeperGetACLRequest::toStringImpl() const
|
||||
std::string ZooKeeperGetACLRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}", path);
|
||||
}
|
||||
@ -455,7 +454,7 @@ void ZooKeeperCheckRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperCheckRequest::toStringImpl() const
|
||||
std::string ZooKeeperCheckRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format("path = {}\nversion = {}", path, version);
|
||||
}
|
||||
@ -600,8 +599,11 @@ void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
|
||||
}
|
||||
}
|
||||
|
||||
std::string ZooKeeperMultiRequest::toStringImpl() const
|
||||
std::string ZooKeeperMultiRequest::toStringImpl(bool short_format) const
|
||||
{
|
||||
if (short_format)
|
||||
return fmt::format("Subrequests size = {}", requests.size());
|
||||
|
||||
auto out = fmt::memory_buffer();
|
||||
for (const auto & request : requests)
|
||||
{
|
||||
|
@ -63,12 +63,12 @@ struct ZooKeeperRequest : virtual Request
|
||||
|
||||
/// Writes length, xid, op_num, then the rest.
|
||||
void write(WriteBuffer & out) const;
|
||||
std::string toString() const;
|
||||
std::string toString(bool short_format = false) const;
|
||||
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
|
||||
virtual std::string toStringImpl() const { return ""; }
|
||||
virtual std::string toStringImpl(bool /*short_format*/) const { return ""; }
|
||||
|
||||
static std::shared_ptr<ZooKeeperRequest> read(ReadBuffer & in);
|
||||
|
||||
@ -98,7 +98,7 @@ struct ZooKeeperSyncRequest final : ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Sync; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
|
||||
@ -123,7 +123,7 @@ struct ZooKeeperReconfigRequest final : ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Reconfig; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
|
||||
@ -176,7 +176,7 @@ struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
@ -229,7 +229,7 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return not_exists ? OpNum::CreateIfNotExists : OpNum::Create; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
@ -266,7 +266,7 @@ struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
@ -293,7 +293,7 @@ struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return true; }
|
||||
@ -320,7 +320,7 @@ struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Get; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return true; }
|
||||
@ -347,7 +347,7 @@ struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::Set; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
|
||||
@ -375,7 +375,7 @@ struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::List; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return true; }
|
||||
|
||||
@ -395,7 +395,7 @@ struct ZooKeeperFilteredListRequest final : ZooKeeperListRequest
|
||||
OpNum getOpNum() const override { return OpNum::FilteredList; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
size_t bytesSize() const override { return ZooKeeperListRequest::bytesSize() + sizeof(list_request_type); }
|
||||
};
|
||||
@ -428,7 +428,7 @@ struct ZooKeeperCheckRequest : CheckRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return not_exists ? OpNum::CheckNotExists : OpNum::Check; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return true; }
|
||||
@ -469,7 +469,7 @@ struct ZooKeeperSetACLRequest final : SetACLRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::SetACL; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
|
||||
@ -490,7 +490,7 @@ struct ZooKeeperGetACLRequest final : GetACLRequest, ZooKeeperRequest
|
||||
OpNum getOpNum() const override { return OpNum::GetACL; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return true; }
|
||||
|
||||
@ -516,7 +516,7 @@ struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl() const override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override;
|
||||
|
@ -169,6 +169,23 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
|
||||
|
||||
writeText("async_replication=", buf);
|
||||
write_bool(coordination_settings->async_replication);
|
||||
|
||||
writeText("latest_logs_cache_size_threshold=", buf);
|
||||
write_int(coordination_settings->latest_logs_cache_size_threshold);
|
||||
writeText("commit_logs_cache_size_threshold=", buf);
|
||||
write_int(coordination_settings->commit_logs_cache_size_threshold);
|
||||
|
||||
writeText("disk_move_retries_wait_ms=", buf);
|
||||
write_int(coordination_settings->disk_move_retries_wait_ms);
|
||||
writeText("disk_move_retries_during_init=", buf);
|
||||
write_int(coordination_settings->disk_move_retries_during_init);
|
||||
|
||||
writeText("log_slow_total_threshold_ms=", buf);
|
||||
write_int(coordination_settings->log_slow_total_threshold_ms);
|
||||
writeText("log_slow_cpu_threshold_ms=", buf);
|
||||
write_int(coordination_settings->log_slow_cpu_threshold_ms);
|
||||
writeText("log_slow_connection_operation_threshold_ms=", buf);
|
||||
write_int(coordination_settings->log_slow_connection_operation_threshold_ms);
|
||||
}
|
||||
|
||||
KeeperConfigurationAndSettingsPtr
|
||||
|
@ -58,7 +58,10 @@ struct Settings;
|
||||
M(UInt64, latest_logs_cache_size_threshold, 1 * 1024 * 1024 * 1024, "Maximum total size of in-memory cache of latest log entries.", 0) \
|
||||
M(UInt64, commit_logs_cache_size_threshold, 500 * 1024 * 1024, "Maximum total size of in-memory cache of log entries needed next for commit.", 0) \
|
||||
M(UInt64, disk_move_retries_wait_ms, 1000, "How long to wait between retries after a failure which happened while a file was being moved between disks.", 0) \
|
||||
M(UInt64, disk_move_retries_during_init, 100, "The amount of retries after a failure which happened while a file was being moved between disks during initialization.", 0)
|
||||
M(UInt64, disk_move_retries_during_init, 100, "The amount of retries after a failure which happened while a file was being moved between disks during initialization.", 0) \
|
||||
M(UInt64, log_slow_total_threshold_ms, 5000, "Requests for which the total latency is larger than this settings will be logged", 0) \
|
||||
M(UInt64, log_slow_cpu_threshold_ms, 100, "Requests for which the CPU (preprocessing and processing) latency is larger than this settings will be logged", 0) \
|
||||
M(UInt64, log_slow_connection_operation_threshold_ms, 1000, "Log message if a certain operation took too long inside a single connection", 0)
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS)
|
||||
|
||||
|
@ -150,12 +150,18 @@
|
||||
M(S3PutObject) \
|
||||
M(S3GetObject) \
|
||||
\
|
||||
M(AzureUploadPart) \
|
||||
M(DiskAzureUploadPart) \
|
||||
M(AzureUpload) \
|
||||
M(DiskAzureUpload) \
|
||||
M(AzureStageBlock) \
|
||||
M(DiskAzureStageBlock) \
|
||||
M(AzureCommitBlockList) \
|
||||
M(DiskAzureCommitBlockList) \
|
||||
M(AzureCopyObject) \
|
||||
M(DiskAzureCopyObject) \
|
||||
M(AzureDeleteObjects) \
|
||||
M(DiskAzureDeleteObjects) \
|
||||
M(AzureListObjects) \
|
||||
M(DiskAzureListObjects) \
|
||||
\
|
||||
M(DiskS3DeleteObjects) \
|
||||
M(DiskS3CopyObject) \
|
||||
@ -238,6 +244,13 @@
|
||||
M(KeeperPacketsReceived) \
|
||||
M(KeeperRequestTotal) \
|
||||
M(KeeperLatency) \
|
||||
M(KeeperTotalElapsedMicroseconds) \
|
||||
M(KeeperProcessElapsedMicroseconds) \
|
||||
M(KeeperPreprocessElapsedMicroseconds) \
|
||||
M(KeeperStorageLockWaitMicroseconds) \
|
||||
M(KeeperCommitWaitElapsedMicroseconds) \
|
||||
M(KeeperBatchMaxCount) \
|
||||
M(KeeperBatchMaxTotalSize) \
|
||||
M(KeeperCommits) \
|
||||
M(KeeperCommitsFailed) \
|
||||
M(KeeperSnapshotCreations) \
|
||||
|
@ -31,6 +31,13 @@ namespace CurrentMetrics
|
||||
extern const Metric KeeperOutstandingRequets;
|
||||
}
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event KeeperCommitWaitElapsedMicroseconds;
|
||||
extern const Event KeeperBatchMaxCount;
|
||||
extern const Event KeeperBatchMaxTotalSize;
|
||||
}
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace DB
|
||||
@ -119,6 +126,7 @@ void KeeperDispatcher::requestThread()
|
||||
auto coordination_settings = configuration_and_settings->coordination_settings;
|
||||
uint64_t max_wait = coordination_settings->operation_timeout_ms.totalMilliseconds();
|
||||
uint64_t max_batch_bytes_size = coordination_settings->max_requests_batch_bytes_size;
|
||||
size_t max_batch_size = coordination_settings->max_requests_batch_size;
|
||||
|
||||
/// The code below do a very simple thing: batch all write (quorum) requests into vector until
|
||||
/// previous write batch is not finished or max_batch size achieved. The main complexity goes from
|
||||
@ -188,7 +196,6 @@ void KeeperDispatcher::requestThread()
|
||||
return false;
|
||||
};
|
||||
|
||||
size_t max_batch_size = coordination_settings->max_requests_batch_size;
|
||||
while (!shutdown_called && current_batch.size() < max_batch_size && !has_reconfig_request
|
||||
&& current_batch_bytes_size < max_batch_bytes_size && try_get_request())
|
||||
;
|
||||
@ -225,6 +232,12 @@ void KeeperDispatcher::requestThread()
|
||||
/// Process collected write requests batch
|
||||
if (!current_batch.empty())
|
||||
{
|
||||
if (current_batch.size() == max_batch_size)
|
||||
ProfileEvents::increment(ProfileEvents::KeeperBatchMaxCount, 1);
|
||||
|
||||
if (current_batch_bytes_size == max_batch_bytes_size)
|
||||
ProfileEvents::increment(ProfileEvents::KeeperBatchMaxTotalSize, 1);
|
||||
|
||||
LOG_TRACE(log, "Processing requests batch, size: {}, bytes: {}", current_batch.size(), current_batch_bytes_size);
|
||||
|
||||
auto result = server->putRequestBatch(current_batch);
|
||||
@ -243,6 +256,8 @@ void KeeperDispatcher::requestThread()
|
||||
/// If we will execute read or reconfig next, we have to process result now
|
||||
if (execute_requests_after_write)
|
||||
{
|
||||
Stopwatch watch;
|
||||
SCOPE_EXIT(ProfileEvents::increment(ProfileEvents::KeeperCommitWaitElapsedMicroseconds, watch.elapsedMicroseconds()));
|
||||
if (prev_result)
|
||||
result_buf = forceWaitAndProcessResult(
|
||||
prev_result, prev_batch, /*clear_requests_on_success=*/!execute_requests_after_write);
|
||||
@ -319,19 +334,13 @@ void KeeperDispatcher::snapshotThread()
|
||||
{
|
||||
setThreadName("KeeperSnpT");
|
||||
const auto & shutdown_called = keeper_context->isShutdownCalled();
|
||||
while (!shutdown_called)
|
||||
CreateSnapshotTask task;
|
||||
while (snapshots_queue.pop(task))
|
||||
{
|
||||
CreateSnapshotTask task;
|
||||
if (!snapshots_queue.pop(task))
|
||||
break;
|
||||
|
||||
try
|
||||
{
|
||||
auto snapshot_file_info = task.create_snapshot(std::move(task.snapshot), /*execute_only_cleanup=*/shutdown_called);
|
||||
|
||||
if (shutdown_called)
|
||||
break;
|
||||
|
||||
if (!snapshot_file_info)
|
||||
continue;
|
||||
|
||||
|
@ -1,12 +1,14 @@
|
||||
#include <atomic>
|
||||
#include <cerrno>
|
||||
#include <chrono>
|
||||
#include <Coordination/KeeperDispatcher.h>
|
||||
#include <Coordination/KeeperReconfiguration.h>
|
||||
#include <Coordination/KeeperSnapshotManager.h>
|
||||
#include <Coordination/KeeperStateMachine.h>
|
||||
#include <Coordination/KeeperDispatcher.h>
|
||||
#include <Coordination/KeeperStorage.h>
|
||||
#include <Coordination/KeeperReconfiguration.h>
|
||||
#include <Coordination/ReadBufferFromNuraftBuffer.h>
|
||||
#include <Coordination/WriteBufferFromNuraftBuffer.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
@ -17,7 +19,6 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -31,6 +32,7 @@ namespace ProfileEvents
|
||||
extern const Event KeeperSnapshotApplysFailed;
|
||||
extern const Event KeeperReadSnapshot;
|
||||
extern const Event KeeperSaveSnapshot;
|
||||
extern const Event KeeperStorageLockWaitMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -151,6 +153,20 @@ void assertDigest(
|
||||
}
|
||||
}
|
||||
|
||||
struct TSA_SCOPED_LOCKABLE LockGuardWithStats final
|
||||
{
|
||||
std::unique_lock<std::mutex> lock;
|
||||
explicit LockGuardWithStats(std::mutex & mutex) TSA_ACQUIRE(mutex)
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::unique_lock l(mutex);
|
||||
ProfileEvents::increment(ProfileEvents::KeeperStorageLockWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
lock = std::move(l);
|
||||
}
|
||||
|
||||
~LockGuardWithStats() TSA_RELEASE() = default;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::pre_commit(uint64_t log_idx, nuraft::buffer & data)
|
||||
@ -272,7 +288,7 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
|
||||
if (op_num == Coordination::OpNum::SessionID || op_num == Coordination::OpNum::Reconfig)
|
||||
return true;
|
||||
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
|
||||
if (storage->isFinalized())
|
||||
return false;
|
||||
@ -302,7 +318,7 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
|
||||
|
||||
void KeeperStateMachine::reconfigure(const KeeperStorage::RequestForSession& request_for_session)
|
||||
{
|
||||
std::lock_guard _(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponseForSession response = processReconfiguration(request_for_session);
|
||||
if (!responses_queue.push(response))
|
||||
{
|
||||
@ -391,7 +407,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
|
||||
return nullptr;
|
||||
|
||||
auto try_push = [this](const KeeperStorage::ResponseForSession& response)
|
||||
auto try_push = [&](const KeeperStorage::ResponseForSession& response)
|
||||
{
|
||||
if (!responses_queue.push(response))
|
||||
{
|
||||
@ -400,6 +416,17 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
"Failed to push response with session id {} to the queue, probably because of shutdown",
|
||||
response.session_id);
|
||||
}
|
||||
|
||||
using namespace std::chrono;
|
||||
uint64_t elapsed = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count() - request_for_session->time;
|
||||
if (elapsed > keeper_context->getCoordinationSettings()->log_slow_total_threshold_ms)
|
||||
{
|
||||
LOG_INFO(
|
||||
log,
|
||||
"Total time to process a request took too long ({}ms).\nRequest info: {}",
|
||||
elapsed,
|
||||
request_for_session->request->toString(/*short_format=*/true));
|
||||
}
|
||||
};
|
||||
|
||||
try
|
||||
@ -417,7 +444,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
response_for_session.session_id = -1;
|
||||
response_for_session.response = response;
|
||||
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
|
||||
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
|
||||
response->session_id = session_id;
|
||||
@ -426,12 +453,13 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
else
|
||||
{
|
||||
if (op_num == Coordination::OpNum::Close)
|
||||
|
||||
{
|
||||
std::lock_guard lock(request_cache_mutex);
|
||||
parsed_request_cache.erase(request_for_session->session_id);
|
||||
}
|
||||
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponsesForSessions responses_for_sessions
|
||||
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
|
||||
for (auto & response_for_session : responses_for_sessions)
|
||||
@ -482,7 +510,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
||||
}
|
||||
|
||||
{ /// deserialize and apply snapshot to storage
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
|
||||
SnapshotDeserializationResult snapshot_deserialization_result;
|
||||
if (latest_snapshot_ptr)
|
||||
@ -534,7 +562,7 @@ void KeeperStateMachine::rollbackRequest(const KeeperStorage::RequestForSession
|
||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||
return;
|
||||
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->rollbackRequest(request_for_session.zxid, allow_missing);
|
||||
}
|
||||
|
||||
@ -561,7 +589,7 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
auto snapshot_meta_copy = nuraft::snapshot::deserialize(*snp_buf);
|
||||
CreateSnapshotTask snapshot_task;
|
||||
{ /// lock storage for a short period time to turn on "snapshot mode". After that we can read consistent storage state without locking.
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
snapshot_task.snapshot = std::make_shared<KeeperStorageSnapshot>(storage.get(), snapshot_meta_copy, getClusterConfig());
|
||||
}
|
||||
|
||||
@ -569,7 +597,7 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
snapshot_task.create_snapshot = [this, when_done](KeeperStorageSnapshotPtr && snapshot, bool execute_only_cleanup)
|
||||
{
|
||||
nuraft::ptr<std::exception> exception(nullptr);
|
||||
bool ret = true;
|
||||
bool ret = false;
|
||||
if (!execute_only_cleanup)
|
||||
{
|
||||
try
|
||||
@ -599,7 +627,8 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
else
|
||||
{
|
||||
auto snapshot_buf = snapshot_manager.serializeSnapshotToBuffer(*snapshot);
|
||||
auto snapshot_info = snapshot_manager.serializeSnapshotBufferToDisk(*snapshot_buf, snapshot->snapshot_meta->get_last_log_idx());
|
||||
auto snapshot_info = snapshot_manager.serializeSnapshotBufferToDisk(
|
||||
*snapshot_buf, snapshot->snapshot_meta->get_last_log_idx());
|
||||
latest_snapshot_info = std::move(snapshot_info);
|
||||
latest_snapshot_buf = std::move(snapshot_buf);
|
||||
}
|
||||
@ -612,18 +641,19 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
latest_snapshot_info->path);
|
||||
}
|
||||
}
|
||||
|
||||
ret = true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperSnapshotCreationsFailed);
|
||||
LOG_TRACE(log, "Exception happened during snapshot");
|
||||
tryLogCurrentException(log);
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
{
|
||||
/// Destroy snapshot with lock
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
LOG_TRACE(log, "Clearing garbage after snapshot");
|
||||
/// Turn off "snapshot mode" and clear outdate part of storage state
|
||||
storage->clearGarbageAfterSnapshot();
|
||||
@ -764,7 +794,7 @@ int KeeperStateMachine::read_logical_snp_obj(
|
||||
void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSession & request_for_session)
|
||||
{
|
||||
/// Pure local request, just process it with storage
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
auto responses = storage->processRequest(
|
||||
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
|
||||
for (const auto & response : responses)
|
||||
@ -774,97 +804,97 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
|
||||
|
||||
void KeeperStateMachine::shutdownStorage()
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->finalize();
|
||||
}
|
||||
|
||||
std::vector<int64_t> KeeperStateMachine::getDeadSessions()
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getDeadSessions();
|
||||
}
|
||||
|
||||
int64_t KeeperStateMachine::getNextZxid() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNextZXID();
|
||||
}
|
||||
|
||||
KeeperStorage::Digest KeeperStateMachine::getNodesDigest() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNodesDigest(false);
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getLastProcessedZxid() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getZXID();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getNodesCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNodesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getTotalWatchesCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getTotalWatchesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getWatchedPathsCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getWatchedPathsCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getSessionsWithWatchesCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getSessionsWithWatchesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getTotalEphemeralNodesCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getTotalEphemeralNodesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getSessionWithEphemeralNodesCount() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getSessionWithEphemeralNodesCount();
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpWatches(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpWatches(buf);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpWatchesByPath(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpWatchesByPath(buf);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpSessionsAndEphemerals(buf);
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getApproximateDataSize() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getApproximateDataSize();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getKeyArenaSize() const
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getArenaDataSize();
|
||||
}
|
||||
|
||||
@ -905,7 +935,7 @@ ClusterConfigPtr KeeperStateMachine::getClusterConfig() const
|
||||
|
||||
void KeeperStateMachine::recalculateStorageStats()
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
LOG_INFO(log, "Recalculating storage stats");
|
||||
storage->recalculateStats();
|
||||
LOG_INFO(log, "Done recalculating storage stats");
|
||||
|
@ -182,8 +182,7 @@ private:
|
||||
|
||||
KeeperSnapshotManagerS3 * snapshot_manager_s3;
|
||||
|
||||
KeeperStorage::ResponseForSession processReconfiguration(
|
||||
const KeeperStorage::RequestForSession& request_for_session)
|
||||
KeeperStorage::ResponseForSession processReconfiguration(const KeeperStorage::RequestForSession & request_for_session)
|
||||
TSA_REQUIRES(storage_and_responses_lock);
|
||||
};
|
||||
}
|
||||
|
@ -40,6 +40,8 @@ namespace ProfileEvents
|
||||
extern const Event KeeperGetRequest;
|
||||
extern const Event KeeperListRequest;
|
||||
extern const Event KeeperExistsRequest;
|
||||
extern const Event KeeperPreprocessElapsedMicroseconds;
|
||||
extern const Event KeeperProcessElapsedMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -2309,6 +2311,20 @@ void KeeperStorage::preprocessRequest(
|
||||
std::optional<Digest> digest,
|
||||
int64_t log_idx)
|
||||
{
|
||||
Stopwatch watch;
|
||||
SCOPE_EXIT({
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
if (auto elapsed_ms = elapsed / 1000; elapsed_ms > keeper_context->getCoordinationSettings()->log_slow_cpu_threshold_ms)
|
||||
{
|
||||
LOG_INFO(
|
||||
getLogger("KeeperStorage"),
|
||||
"Preprocessing a request took too long ({}ms).\nRequest info: {}",
|
||||
elapsed_ms,
|
||||
zk_request->toString(/*short_format=*/true));
|
||||
}
|
||||
ProfileEvents::increment(ProfileEvents::KeeperPreprocessElapsedMicroseconds, elapsed);
|
||||
});
|
||||
|
||||
if (!initialized)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "KeeperStorage system nodes are not initialized");
|
||||
|
||||
@ -2409,6 +2425,20 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(
|
||||
bool check_acl,
|
||||
bool is_local)
|
||||
{
|
||||
Stopwatch watch;
|
||||
SCOPE_EXIT({
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
if (auto elapsed_ms = elapsed / 1000; elapsed_ms > keeper_context->getCoordinationSettings()->log_slow_cpu_threshold_ms)
|
||||
{
|
||||
LOG_INFO(
|
||||
getLogger("KeeperStorage"),
|
||||
"Processing a request took too long ({}ms).\nRequest info: {}",
|
||||
elapsed_ms,
|
||||
zk_request->toString(/*short_format=*/true));
|
||||
}
|
||||
ProfileEvents::increment(ProfileEvents::KeeperProcessElapsedMicroseconds, elapsed);
|
||||
});
|
||||
|
||||
if (!initialized)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "KeeperStorage system nodes are not initialized");
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/ArenaUtils.h>
|
||||
|
||||
#include <list>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -623,7 +623,7 @@ class IColumn;
|
||||
M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
||||
M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \
|
||||
M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \
|
||||
M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \
|
||||
M(Bool, optimize_functions_to_subcolumns, true, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \
|
||||
M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \
|
||||
M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \
|
||||
M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \
|
||||
@ -732,6 +732,7 @@ class IColumn;
|
||||
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
|
||||
M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \
|
||||
M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \
|
||||
M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \
|
||||
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result, one of: 'none', 'throw', 'null_status_on_timeout', 'never_throw', 'none_only_active', 'throw_only_active', 'null_status_on_timeout_only_active'", 0) \
|
||||
|
@ -58,8 +58,11 @@ String ClickHouseVersion::toString() const
|
||||
static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory::SettingsChanges>> settings_changes_history_initializer =
|
||||
{
|
||||
{"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."},
|
||||
{"optimize_functions_to_subcolumns", false, true, "Enable optimization by default"},
|
||||
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
|
||||
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
|
||||
{"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."},
|
||||
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
|
||||
}},
|
||||
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
|
||||
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},
|
||||
@ -90,7 +93,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."},
|
||||
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
|
||||
}},
|
||||
{"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"},
|
||||
{"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."},
|
||||
|
@ -32,7 +32,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_INDEX;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ std::optional<size_t> DataTypeTuple::tryGetPositionByName(const String & name) c
|
||||
String DataTypeTuple::getNameByPosition(size_t i) const
|
||||
{
|
||||
if (i == 0 || i > names.size())
|
||||
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Index of tuple element ({}) if out range ([1, {}])", i, names.size());
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index of tuple element ({}) is out range ([1, {}])", i, names.size());
|
||||
|
||||
return names[i - 1];
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ void IDisk::checkAccess()
|
||||
DB::UUID server_uuid = DB::ServerUUID::get();
|
||||
if (server_uuid == DB::UUIDHelpers::Nil)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Server UUID is not initialized");
|
||||
const String path = fmt::format("clickhouse_access_check_{}", DB::toString(server_uuid));
|
||||
const String path = fmt::format("clickhouse_access_check_{}", toString(server_uuid));
|
||||
|
||||
checkAccessImpl(path);
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ public:
|
||||
/// Device: 10301h/66305d Inode: 3109907 Links: 1
|
||||
/// Why we have always zero by default? Because normal filesystem
|
||||
/// manages hardlinks by itself. So you can always remove hardlink and all
|
||||
/// other alive harlinks will not be removed.
|
||||
/// other alive hardlinks will not be removed.
|
||||
virtual UInt32 getRefCount(const String &) const { return 0; }
|
||||
|
||||
/// Revision is an incremental counter of disk operation.
|
||||
|
@ -14,6 +14,15 @@ namespace ProfileEvents
|
||||
{
|
||||
extern const Event RemoteWriteThrottlerBytes;
|
||||
extern const Event RemoteWriteThrottlerSleepMicroseconds;
|
||||
|
||||
extern const Event AzureUpload;
|
||||
extern const Event AzureStageBlock;
|
||||
extern const Event AzureCommitBlockList;
|
||||
|
||||
extern const Event DiskAzureUpload;
|
||||
extern const Event DiskAzureStageBlock;
|
||||
extern const Event DiskAzureCommitBlockList;
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -134,6 +143,10 @@ void WriteBufferFromAzureBlobStorage::preFinalize()
|
||||
/// then we use single part upload instead of multi part upload
|
||||
if (block_ids.empty() && detached_part_data.size() == 1 && detached_part_data.front().data_size <= max_single_part_upload_size)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AzureUpload);
|
||||
if (blob_container_client->GetClickhouseOptions().IsClientForDisk)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureUpload);
|
||||
|
||||
auto part_data = std::move(detached_part_data.front());
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(part_data.memory.data()), part_data.data_size);
|
||||
@ -164,6 +177,10 @@ void WriteBufferFromAzureBlobStorage::finalizeImpl()
|
||||
if (!block_ids.empty())
|
||||
{
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
ProfileEvents::increment(ProfileEvents::AzureCommitBlockList);
|
||||
if (blob_container_client->GetClickhouseOptions().IsClientForDisk)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureCommitBlockList);
|
||||
|
||||
execWithRetry([&](){ block_blob_client.CommitBlockList(block_ids); }, max_unexpected_write_error_retries);
|
||||
LOG_TRACE(log, "Committed {} blocks for blob `{}`", block_ids.size(), blob_path);
|
||||
}
|
||||
@ -269,6 +286,10 @@ void WriteBufferFromAzureBlobStorage::writePart(WriteBufferFromAzureBlobStorage:
|
||||
auto & data_block_id = std::get<0>(*worker_data);
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::AzureStageBlock);
|
||||
if (blob_container_client->GetClickhouseOptions().IsClientForDisk)
|
||||
ProfileEvents::increment(ProfileEvents::DiskAzureStageBlock);
|
||||
|
||||
Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(std::get<1>(*worker_data).memory.data()), data_size);
|
||||
execWithRetry([&](){ block_blob_client.StageBlock(data_block_id, memory_stream); }, max_unexpected_write_error_retries, data_size);
|
||||
};
|
||||
|
@ -60,7 +60,6 @@ public:
|
||||
"ListObjectAzure")
|
||||
, client(client_)
|
||||
{
|
||||
|
||||
options.Prefix = path_prefix;
|
||||
options.PageSizeHint = static_cast<int>(max_list_size);
|
||||
}
|
||||
@ -150,7 +149,7 @@ ObjectStorageIteratorPtr AzureObjectStorage::iterate(const std::string & path_pr
|
||||
auto settings_ptr = settings.get();
|
||||
auto client_ptr = client.get();
|
||||
|
||||
return std::make_shared<AzureIteratorAsync>(path_prefix, client_ptr, max_keys);
|
||||
return std::make_shared<AzureIteratorAsync>(path_prefix, client_ptr, max_keys ? max_keys : settings_ptr->list_object_keys_size);
|
||||
}
|
||||
|
||||
void AzureObjectStorage::listObjects(const std::string & path, RelativePathsWithMetadata & children, size_t max_keys) const
|
||||
|
@ -75,6 +75,7 @@ struct RelativePathWithMetadata
|
||||
virtual std::string getPath() const { return relative_path; }
|
||||
virtual bool isArchive() const { return false; }
|
||||
virtual std::string getPathToArchive() const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not an archive"); }
|
||||
virtual size_t fileSizeInArchive() const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not an archive"); }
|
||||
};
|
||||
|
||||
struct ObjectKeyWithMetadata
|
||||
|
@ -22,8 +22,7 @@ using UnlinkMetadataFileOperationOutcomePtr = std::shared_ptr<UnlinkMetadataFile
|
||||
/// Also it has excessive API calls.
|
||||
///
|
||||
/// It is used to allow BACKUP/RESTORE to ObjectStorage (S3/...) with the same
|
||||
/// structure as on disk MergeTree, and does not requires metadata from local
|
||||
/// disk to restore.
|
||||
/// structure as on disk MergeTree, and does not require metadata from a local disk to restore.
|
||||
class MetadataStorageFromPlainObjectStorage : public IMetadataStorage
|
||||
{
|
||||
public:
|
||||
|
@ -1,10 +1,14 @@
|
||||
#include <Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h>
|
||||
#include <Disks/ObjectStorages/ObjectStorageIterator.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/SharedThreadPools.h>
|
||||
#include <IO/S3Common.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include "CommonPathPrefixKeyGenerator.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -22,34 +26,78 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri
|
||||
{
|
||||
MetadataStorageFromPlainObjectStorage::PathMap result;
|
||||
|
||||
RelativePathsWithMetadata files;
|
||||
object_storage->listObjects(root, files, 0);
|
||||
for (const auto & file : files)
|
||||
ThreadPool & pool = getIOThreadPool().get();
|
||||
ThreadPoolCallbackRunnerLocal<void> runner(pool, "PlainRWMetaLoad");
|
||||
std::mutex mutex;
|
||||
|
||||
LoggerPtr log = getLogger("MetadataStorageFromPlainObjectStorage");
|
||||
|
||||
ReadSettings settings;
|
||||
settings.enable_filesystem_cache = false;
|
||||
settings.remote_fs_method = RemoteFSReadMethod::read;
|
||||
settings.remote_fs_buffer_size = 1024; /// These files are small.
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata");
|
||||
size_t num_files = 0;
|
||||
for (auto iterator = object_storage->iterate(root, 0); iterator->isValid(); iterator->next())
|
||||
{
|
||||
auto remote_path = std::filesystem::path(file->relative_path);
|
||||
++num_files;
|
||||
auto file = iterator->current();
|
||||
String path = file->getPath();
|
||||
auto remote_path = std::filesystem::path(path);
|
||||
if (remote_path.filename() != PREFIX_PATH_FILE_NAME)
|
||||
continue;
|
||||
|
||||
StoredObject object{file->relative_path};
|
||||
runner([remote_path, path, &object_storage, &result, &mutex, &log, &settings]
|
||||
{
|
||||
setThreadName("PlainRWMetaLoad");
|
||||
|
||||
auto read_buf = object_storage->readObject(object);
|
||||
String local_path;
|
||||
readStringUntilEOF(local_path, *read_buf);
|
||||
StoredObject object{path};
|
||||
String local_path;
|
||||
|
||||
chassert(remote_path.has_parent_path());
|
||||
auto res = result.emplace(local_path, remote_path.parent_path());
|
||||
try
|
||||
{
|
||||
auto read_buf = object_storage->readObject(object, settings);
|
||||
readStringUntilEOF(local_path, *read_buf);
|
||||
}
|
||||
#if USE_AWS_S3
|
||||
catch (const S3Exception & e)
|
||||
{
|
||||
/// It is ok if a directory was removed just now.
|
||||
/// We support attaching a filesystem that is concurrently modified by someone else.
|
||||
if (e.getS3ErrorCode() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||
return;
|
||||
throw;
|
||||
}
|
||||
#endif
|
||||
catch (...)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
|
||||
/// This can happen if table replication is enabled, then the same local path is written
|
||||
/// in `prefix.path` of each replica.
|
||||
/// TODO: should replicated tables (e.g., RMT) be explicitly disallowed?
|
||||
if (!res.second)
|
||||
LOG_WARNING(
|
||||
getLogger("MetadataStorageFromPlainObjectStorage"),
|
||||
"The local path '{}' is already mapped to a remote path '{}', ignoring: '{}'",
|
||||
local_path,
|
||||
res.first->second,
|
||||
remote_path.parent_path().string());
|
||||
chassert(remote_path.has_parent_path());
|
||||
std::pair<MetadataStorageFromPlainObjectStorage::PathMap::iterator, bool> res;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
res = result.emplace(local_path, remote_path.parent_path());
|
||||
}
|
||||
|
||||
/// This can happen if table replication is enabled, then the same local path is written
|
||||
/// in `prefix.path` of each replica.
|
||||
/// TODO: should replicated tables (e.g., RMT) be explicitly disallowed?
|
||||
if (!res.second)
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"The local path '{}' is already mapped to a remote path '{}', ignoring: '{}'",
|
||||
local_path,
|
||||
res.first->second,
|
||||
remote_path.parent_path().string());
|
||||
});
|
||||
}
|
||||
|
||||
runner.waitForAllToFinishAndRethrowFirstError();
|
||||
LOG_DEBUG(log, "Loaded metadata for {} files, found {} directories", num_files, result.size());
|
||||
|
||||
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
||||
CurrentMetrics::add(metric, result.size());
|
||||
return result;
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -9,15 +9,34 @@ namespace DB
|
||||
class IObjectStorageIterator
|
||||
{
|
||||
public:
|
||||
/// Moves iterator to the next element. If the iterator not isValid, the behavior is undefined.
|
||||
virtual void next() = 0;
|
||||
virtual void nextBatch() = 0;
|
||||
|
||||
/// Check if the iterator is valid, which means the `current` method can be called.
|
||||
virtual bool isValid() = 0;
|
||||
|
||||
/// Return the current element.
|
||||
virtual RelativePathWithMetadataPtr current() = 0;
|
||||
virtual RelativePathsWithMetadata currentBatch() = 0;
|
||||
|
||||
/// This will initiate prefetching the next batch in background, so it can be obtained faster when needed.
|
||||
virtual std::optional<RelativePathsWithMetadata> getCurrentBatchAndScheduleNext() = 0;
|
||||
|
||||
/// Returns the number of elements in the batches that were fetched so far.
|
||||
virtual size_t getAccumulatedSize() const = 0;
|
||||
|
||||
virtual ~IObjectStorageIterator() = default;
|
||||
|
||||
private:
|
||||
/// Skips all the remaining elements in the current batch (if any),
|
||||
/// and moves the iterator to the first element of the next batch,
|
||||
/// or, if there is no more batches, the iterator becomes invalid.
|
||||
/// If the iterator not isValid, the behavior is undefined.
|
||||
virtual void nextBatch() = 0;
|
||||
|
||||
/// Return the current batch of elements.
|
||||
/// It is unspecified how batches are formed.
|
||||
/// But this method can be used for more efficient processing.
|
||||
virtual RelativePathsWithMetadata currentBatch() = 0;
|
||||
};
|
||||
|
||||
using ObjectStorageIteratorPtr = std::shared_ptr<IObjectStorageIterator>;
|
||||
@ -25,6 +44,7 @@ using ObjectStorageIteratorPtr = std::shared_ptr<IObjectStorageIterator>;
|
||||
class ObjectStorageIteratorFromList : public IObjectStorageIterator
|
||||
{
|
||||
public:
|
||||
/// Everything is represented by just a single batch.
|
||||
explicit ObjectStorageIteratorFromList(RelativePathsWithMetadata && batch_)
|
||||
: batch(std::move(batch_))
|
||||
, batch_iterator(batch.begin()) {}
|
||||
|
@ -37,10 +37,11 @@ void IObjectStorageIteratorAsync::nextBatch()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (is_finished)
|
||||
if (!has_next_batch)
|
||||
{
|
||||
current_batch.clear();
|
||||
current_batch_iterator = current_batch.begin();
|
||||
is_finished = true;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -58,16 +59,23 @@ void IObjectStorageIteratorAsync::nextBatch()
|
||||
current_batch = std::move(result.batch);
|
||||
current_batch_iterator = current_batch.begin();
|
||||
|
||||
accumulated_size.fetch_add(current_batch.size(), std::memory_order_relaxed);
|
||||
|
||||
if (result.has_next)
|
||||
outcome_future = scheduleBatch();
|
||||
else
|
||||
if (current_batch.empty())
|
||||
{
|
||||
is_finished = true;
|
||||
has_next_batch = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
accumulated_size.fetch_add(current_batch.size(), std::memory_order_relaxed);
|
||||
|
||||
has_next_batch = result.has_next;
|
||||
if (has_next_batch)
|
||||
outcome_future = scheduleBatch();
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
is_finished = true;
|
||||
has_next_batch = false;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@ -76,10 +84,12 @@ void IObjectStorageIteratorAsync::next()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (is_finished)
|
||||
return;
|
||||
|
||||
++current_batch_iterator;
|
||||
if (current_batch_iterator == current_batch.end())
|
||||
nextBatch();
|
||||
else
|
||||
++current_batch_iterator;
|
||||
}
|
||||
|
||||
std::future<IObjectStorageIteratorAsync::BatchAndHasNext> IObjectStorageIteratorAsync::scheduleBatch()
|
||||
@ -99,7 +109,7 @@ bool IObjectStorageIteratorAsync::isValid()
|
||||
if (!is_initialized)
|
||||
nextBatch();
|
||||
|
||||
return current_batch_iterator != current_batch.end();
|
||||
return !is_finished;
|
||||
}
|
||||
|
||||
RelativePathWithMetadataPtr IObjectStorageIteratorAsync::current()
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
void deactivate();
|
||||
|
||||
protected:
|
||||
|
||||
/// This method fetches the next batch, and returns true if there are more batches after it.
|
||||
virtual bool getBatchAndCheckNext(RelativePathsWithMetadata & batch) = 0;
|
||||
|
||||
struct BatchAndHasNext
|
||||
@ -48,6 +48,7 @@ protected:
|
||||
|
||||
bool is_initialized{false};
|
||||
bool is_finished{false};
|
||||
bool has_next_batch{true};
|
||||
bool deactivated{false};
|
||||
|
||||
mutable std::recursive_mutex mutex;
|
||||
|
@ -293,6 +293,8 @@ std::unique_ptr<WriteBufferFromFileBase> S3ObjectStorage::writeObject( /// NOLIN
|
||||
ObjectStorageIteratorPtr S3ObjectStorage::iterate(const std::string & path_prefix, size_t max_keys) const
|
||||
{
|
||||
auto settings_ptr = s3_settings.get();
|
||||
if (!max_keys)
|
||||
max_keys = settings_ptr->list_object_keys_size;
|
||||
return std::make_shared<S3IteratorAsync>(uri.bucket, path_prefix, client.get(), max_keys);
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
template <class ...Args>
|
||||
template <typename... Args>
|
||||
explicit S3ObjectStorage(std::unique_ptr<S3::Client> && client_, Args && ...args)
|
||||
: S3ObjectStorage("S3ObjectStorage", std::move(client_), std::forward<Args>(args)...)
|
||||
{
|
||||
|
@ -202,7 +202,7 @@ public:
|
||||
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), nullptr, "String or FixedString"}
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments, mandatory_arguments);
|
||||
validateFunctionArguments(*this, arguments, mandatory_arguments);
|
||||
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int PARAMETER_OUT_OF_BOUND;
|
||||
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
|
||||
}
|
||||
|
||||
@ -146,6 +147,9 @@ private:
|
||||
const auto pos = pos_col_const->getUInt(0);
|
||||
if (pos < 8 * sizeof(ValueType))
|
||||
mask = mask | (ValueType(1) << pos);
|
||||
else
|
||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND,
|
||||
"The bit position argument {} is out of bounds for number", static_cast<UInt64>(pos));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -186,13 +190,20 @@ private:
|
||||
for (const auto i : collections::range(0, mask.size()))
|
||||
if (pos[i] < 8 * sizeof(ValueType))
|
||||
mask[i] = mask[i] | (ValueType(1) << pos[i]);
|
||||
else
|
||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND,
|
||||
"The bit position argument {} is out of bounds for number", static_cast<UInt64>(pos[i]));
|
||||
|
||||
return true;
|
||||
}
|
||||
else if (const auto pos_col_const = checkAndGetColumnConst<ColumnVector<PosType>>(pos_col_untyped))
|
||||
{
|
||||
const auto & pos = pos_col_const->template getValue<PosType>();
|
||||
const auto new_mask = pos < 8 * sizeof(ValueType) ? ValueType(1) << pos : 0;
|
||||
if (pos >= 8 * sizeof(ValueType))
|
||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND,
|
||||
"The bit position argument {} is out of bounds for number", static_cast<UInt64>(pos));
|
||||
|
||||
const auto new_mask = ValueType(1) << pos;
|
||||
|
||||
for (const auto i : collections::range(0, mask.size()))
|
||||
mask[i] = mask[i] | new_mask;
|
||||
|
@ -95,22 +95,21 @@ ColumnsWithTypeAndName createBlockWithNestedColumns(const ColumnsWithTypeAndName
|
||||
return res;
|
||||
}
|
||||
|
||||
void validateArgumentType(const IFunction & func, const DataTypes & arguments,
|
||||
size_t argument_index, bool (* validator_func)(const IDataType &),
|
||||
const char * expected_type_description)
|
||||
{
|
||||
if (arguments.size() <= argument_index)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Incorrect number of arguments of function {}",
|
||||
func.getName());
|
||||
|
||||
const auto & argument = arguments[argument_index];
|
||||
if (!validator_func(*argument))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of {} argument of function {}, expected {}",
|
||||
argument->getName(), std::to_string(argument_index), func.getName(), expected_type_description);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
String withOrdinalEnding(size_t i)
|
||||
{
|
||||
switch (i)
|
||||
{
|
||||
case 0: return "1st";
|
||||
case 1: return "2nd";
|
||||
case 2: return "3rd";
|
||||
default: return std::to_string(i) + "th";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void validateArgumentsImpl(const IFunction & func,
|
||||
const ColumnsWithTypeAndName & arguments,
|
||||
size_t argument_offset,
|
||||
@ -120,20 +119,18 @@ void validateArgumentsImpl(const IFunction & func,
|
||||
{
|
||||
const auto argument_index = i + argument_offset;
|
||||
if (argument_index >= arguments.size())
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
const auto & arg = arguments[i + argument_offset];
|
||||
const auto & descriptor = descriptors[i];
|
||||
if (int error_code = descriptor.isValid(arg.type, arg.column); error_code != 0)
|
||||
throw Exception(error_code,
|
||||
"Illegal type of argument #{}{} of function {}{}{}",
|
||||
argument_offset + i + 1, // +1 is for human-friendly 1-based indexing
|
||||
(descriptor.argument_name ? " '" + std::string(descriptor.argument_name) + "'" : String{}),
|
||||
"A value of illegal type was provided as {} argument '{}' to function '{}'. Expected: {}, got: {}",
|
||||
withOrdinalEnding(argument_offset + i),
|
||||
descriptor.name,
|
||||
func.getName(),
|
||||
(descriptor.expected_type_description ? String(", expected ") + descriptor.expected_type_description : String{}),
|
||||
(arg.type ? ", got " + arg.type->getName() : String{}));
|
||||
descriptor.type_name,
|
||||
arg.type ? arg.type->getName() : "<?>");
|
||||
}
|
||||
}
|
||||
|
||||
@ -141,52 +138,42 @@ void validateArgumentsImpl(const IFunction & func,
|
||||
|
||||
int FunctionArgumentDescriptor::isValid(const DataTypePtr & data_type, const ColumnPtr & column) const
|
||||
{
|
||||
if (type_validator_func && (data_type == nullptr || !type_validator_func(*data_type)))
|
||||
if (name.empty() || type_name.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "name or type_name are not set");
|
||||
|
||||
if (type_validator && (data_type == nullptr || !type_validator(*data_type)))
|
||||
return ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
|
||||
if (column_validator_func && (column == nullptr || !column_validator_func(*column)))
|
||||
if (column_validator && (column == nullptr || !column_validator(*column)))
|
||||
return ErrorCodes::ILLEGAL_COLUMN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void validateFunctionArgumentTypes(const IFunction & func,
|
||||
const ColumnsWithTypeAndName & arguments,
|
||||
const FunctionArgumentDescriptors & mandatory_args,
|
||||
const FunctionArgumentDescriptors & optional_args)
|
||||
void validateFunctionArguments(const IFunction & func,
|
||||
const ColumnsWithTypeAndName & arguments,
|
||||
const FunctionArgumentDescriptors & mandatory_args,
|
||||
const FunctionArgumentDescriptors & optional_args)
|
||||
{
|
||||
if (arguments.size() < mandatory_args.size() || arguments.size() > mandatory_args.size() + optional_args.size())
|
||||
{
|
||||
auto join_argument_types = [](const auto & args, const String sep = ", ")
|
||||
{
|
||||
String result;
|
||||
for (const auto & a : args)
|
||||
{
|
||||
using A = std::decay_t<decltype(a)>;
|
||||
if constexpr (std::is_same_v<A, FunctionArgumentDescriptor>)
|
||||
{
|
||||
if (a.argument_name)
|
||||
result += "'" + std::string(a.argument_name) + "' : ";
|
||||
if (a.expected_type_description)
|
||||
result += a.expected_type_description;
|
||||
}
|
||||
else if constexpr (std::is_same_v<A, ColumnWithTypeAndName>)
|
||||
result += a.type->getName();
|
||||
auto argument_singular_or_plural = [](const auto & args) -> std::string_view { return args.size() == 1 ? "argument" : "arguments"; };
|
||||
|
||||
result += sep;
|
||||
}
|
||||
|
||||
if (!args.empty())
|
||||
result.erase(result.end() - sep.length(), result.end());
|
||||
|
||||
return result;
|
||||
};
|
||||
String expected_args_string;
|
||||
if (!mandatory_args.empty() && !optional_args.empty())
|
||||
expected_args_string = fmt::format("{} mandatory {} and {} optional {}", mandatory_args.size(), argument_singular_or_plural(mandatory_args), optional_args.size(), argument_singular_or_plural(optional_args));
|
||||
else if (!mandatory_args.empty() && optional_args.empty())
|
||||
expected_args_string = fmt::format("{} {}", mandatory_args.size(), argument_singular_or_plural(mandatory_args)); /// intentionally not "_mandatory_ arguments"
|
||||
else if (mandatory_args.empty() && !optional_args.empty())
|
||||
expected_args_string = fmt::format("{} optional {}", optional_args.size(), argument_singular_or_plural(optional_args));
|
||||
else
|
||||
expected_args_string = "0 arguments";
|
||||
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Incorrect number of arguments for function {} provided {}{}, expected {}{} ({}{})",
|
||||
func.getName(), arguments.size(), (!arguments.empty() ? " (" + join_argument_types(arguments) + ")" : String{}),
|
||||
mandatory_args.size(), (!optional_args.empty() ? " to " + std::to_string(mandatory_args.size() + optional_args.size()) : ""),
|
||||
join_argument_types(mandatory_args), (!optional_args.empty() ? ", [" + join_argument_types(optional_args) + "]" : ""));
|
||||
"An incorrect number of arguments was specified for function '{}'. Expected {}, got {}",
|
||||
func.getName(),
|
||||
expected_args_string,
|
||||
fmt::format("{} {}", arguments.size(), argument_singular_or_plural(arguments)));
|
||||
}
|
||||
|
||||
validateArgumentsImpl(func, arguments, 0, mandatory_args);
|
||||
|
@ -115,77 +115,58 @@ ColumnWithTypeAndName columnGetNested(const ColumnWithTypeAndName & col);
|
||||
/// column if it is nullable.
|
||||
ColumnsWithTypeAndName createBlockWithNestedColumns(const ColumnsWithTypeAndName & columns);
|
||||
|
||||
/// Checks argument type at specified index with predicate.
|
||||
/// throws if there is no argument at specified index or if predicate returns false.
|
||||
void validateArgumentType(const IFunction & func, const DataTypes & arguments,
|
||||
size_t argument_index, bool (* validator_func)(const IDataType &),
|
||||
const char * expected_type_description);
|
||||
|
||||
/** Simple validator that is used in conjunction with validateFunctionArgumentTypes() to check if function arguments are as expected
|
||||
*
|
||||
* Also it is used to generate function description when arguments do not match expected ones.
|
||||
* Any field can be null:
|
||||
* `argument_name` - if not null, reported via type check errors.
|
||||
* `expected_type_description` - if not null, reported via type check errors.
|
||||
* `type_validator_func` - if not null, used to validate data type of function argument.
|
||||
* `column_validator_func` - if not null, used to validate column of function argument.
|
||||
*/
|
||||
/// Expected arguments for a function. Can be used in conjunction with validateFunctionArguments() to check that the user-provided
|
||||
/// arguments match the expected arguments.
|
||||
struct FunctionArgumentDescriptor
|
||||
{
|
||||
const char * argument_name;
|
||||
/// The argument name, e.g. "longitude".
|
||||
/// Should not be empty.
|
||||
std::string_view name;
|
||||
|
||||
/// A function which validates the argument data type.
|
||||
/// May be nullptr.
|
||||
using TypeValidator = bool (*)(const IDataType &);
|
||||
TypeValidator type_validator_func;
|
||||
TypeValidator type_validator;
|
||||
|
||||
/// A function which validates the argument column.
|
||||
/// May be nullptr.
|
||||
using ColumnValidator = bool (*)(const IColumn &);
|
||||
ColumnValidator column_validator_func;
|
||||
ColumnValidator column_validator;
|
||||
|
||||
const char * expected_type_description;
|
||||
/// The expected argument type, e.g. "const String" or "UInt64".
|
||||
/// Should not be empty.
|
||||
std::string_view type_name;
|
||||
|
||||
/** Validate argument type and column.
|
||||
*
|
||||
* Returns non-zero error code if:
|
||||
* Validator != nullptr && (Value == nullptr || Validator(*Value) == false)
|
||||
* For:
|
||||
* Validator is either `type_validator_func` or `column_validator_func`
|
||||
* Value is either `data_type` or `column` respectively.
|
||||
* ILLEGAL_TYPE_OF_ARGUMENT if type validation fails
|
||||
*
|
||||
*/
|
||||
/// Validate argument type and column.
|
||||
int isValid(const DataTypePtr & data_type, const ColumnPtr & column) const;
|
||||
};
|
||||
|
||||
using FunctionArgumentDescriptors = std::vector<FunctionArgumentDescriptor>;
|
||||
|
||||
/** Validate that function arguments match specification.
|
||||
*
|
||||
* Designed to simplify argument validation for functions with variable arguments
|
||||
* (e.g. depending on result type or other trait).
|
||||
* First, checks that number of arguments is as expected (including optional arguments).
|
||||
* Second, checks that mandatory args present and have valid type.
|
||||
* Third, checks optional arguments types, skipping ones that are missing.
|
||||
*
|
||||
* Please note that if you have several optional arguments, like f([a, b, c]),
|
||||
* only these calls are considered valid:
|
||||
* f(a)
|
||||
* f(a, b)
|
||||
* f(a, b, c)
|
||||
*
|
||||
* But NOT these: f(a, c), f(b, c)
|
||||
* In other words you can't omit middle optional arguments (just like in regular C++).
|
||||
*
|
||||
* If any mandatory arg is missing, throw an exception, with explicit description of expected arguments.
|
||||
*/
|
||||
void validateFunctionArgumentTypes(const IFunction & func, const ColumnsWithTypeAndName & arguments,
|
||||
const FunctionArgumentDescriptors & mandatory_args,
|
||||
const FunctionArgumentDescriptors & optional_args = {});
|
||||
/// Validates that the user-provided arguments match the expected arguments.
|
||||
///
|
||||
/// Checks that
|
||||
/// - the number of provided arguments matches the number of mandatory/optional arguments,
|
||||
/// - all mandatory arguments are present and have the right type,
|
||||
/// - optional arguments - if present - have the right type.
|
||||
///
|
||||
/// With multiple optional arguments, e.g. f([a, b, c]), provided arguments must match left-to-right. E.g. these calls are considered valid:
|
||||
/// f(a)
|
||||
/// f(a, b)
|
||||
/// f(a, b, c)
|
||||
/// but these are NOT:
|
||||
/// f(a, c)
|
||||
/// f(b, c)
|
||||
void validateFunctionArguments(const IFunction & func, const ColumnsWithTypeAndName & arguments,
|
||||
const FunctionArgumentDescriptors & mandatory_args,
|
||||
const FunctionArgumentDescriptors & optional_args = {});
|
||||
|
||||
/// Checks if a list of array columns have equal offsets. Return a pair of nested columns and offsets if true, otherwise throw.
|
||||
std::pair<std::vector<const IColumn *>, const ColumnArray::Offset *>
|
||||
checkAndGetNestedArrayOffset(const IColumn ** columns, size_t num_arguments);
|
||||
|
||||
/** Return ColumnNullable of src, with null map as OR-ed null maps of args columns.
|
||||
* Or ColumnConst(ColumnNullable) if the result is always NULL or if the result is constant and always not NULL.
|
||||
*/
|
||||
/// Return ColumnNullable of src, with null map as OR-ed null maps of args columns.
|
||||
/// Or ColumnConst(ColumnNullable) if the result is always NULL or if the result is constant and always not NULL.
|
||||
ColumnPtr wrapInNullable(const ColumnPtr & src, const ColumnsWithTypeAndName & args, const DataTypePtr & result_type, size_t input_rows_count);
|
||||
|
||||
struct NullPresence
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/HashJoin.h>
|
||||
#include <Interpreters/HashJoin/HashJoin.h>
|
||||
#include <Storages/StorageJoin.h>
|
||||
#include <Storages/TableLockHolder.h>
|
||||
|
||||
|
@ -40,7 +40,7 @@ public:
|
||||
{"replacement", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"}
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ static inline void checkArgumentsWithSeparatorAndOptionalMaxSubstrings(
|
||||
{"max_substrings", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), isColumnConst, "const Number"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args, optional_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args, optional_args);
|
||||
}
|
||||
|
||||
static inline void checkArgumentsWithOptionalMaxSubstrings(const IFunction & func, const ColumnsWithTypeAndName & arguments)
|
||||
@ -207,7 +207,7 @@ static inline void checkArgumentsWithOptionalMaxSubstrings(const IFunction & fun
|
||||
{"max_substrings", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), isColumnConst, "const Number"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args, optional_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args, optional_args);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
FunctionArgumentDescriptors args{
|
||||
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isDateTime64), nullptr, "DateTime64"}
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
|
||||
return std::make_shared<DataTypeInt64>();
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ private:
|
||||
});
|
||||
}
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments,
|
||||
validateFunctionArguments(*this, arguments,
|
||||
FunctionArgumentDescriptors{
|
||||
{"mode", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), isColumnConst, "encryption mode string"},
|
||||
{"input", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), {}, "plaintext"},
|
||||
@ -438,7 +438,7 @@ private:
|
||||
});
|
||||
}
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments,
|
||||
validateFunctionArguments(*this, arguments,
|
||||
FunctionArgumentDescriptors{
|
||||
{"mode", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), isColumnConst, "decryption mode string"},
|
||||
{"input", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), {}, "ciphertext"},
|
||||
|
@ -2020,7 +2020,7 @@ public:
|
||||
|
||||
DataTypePtr getReturnTypeImplRemovedNullable(const ColumnsWithTypeAndName & arguments) const
|
||||
{
|
||||
FunctionArgumentDescriptors mandatory_args = {{"Value", nullptr, nullptr, nullptr}};
|
||||
FunctionArgumentDescriptors mandatory_args = {{"Value", nullptr, nullptr, "any type"}};
|
||||
FunctionArgumentDescriptors optional_args;
|
||||
|
||||
if constexpr (to_decimal)
|
||||
@ -2049,7 +2049,7 @@ public:
|
||||
optional_args.push_back({"timezone", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"});
|
||||
}
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments, mandatory_args, optional_args);
|
||||
validateFunctionArguments(*this, arguments, mandatory_args, optional_args);
|
||||
|
||||
if constexpr (std::is_same_v<ToDataType, DataTypeInterval>)
|
||||
{
|
||||
@ -2390,7 +2390,7 @@ public:
|
||||
|
||||
if (isDateTime64<Name, ToDataType>(arguments))
|
||||
{
|
||||
validateFunctionArgumentTypes(*this, arguments,
|
||||
validateFunctionArguments(*this, arguments,
|
||||
FunctionArgumentDescriptors{{"string", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isStringOrFixedString), nullptr, "String or FixedString"}},
|
||||
// optional
|
||||
FunctionArgumentDescriptors{
|
||||
|
@ -518,66 +518,78 @@ struct Dispatcher
|
||||
template <typename ScaleType>
|
||||
static ColumnPtr apply(const IColumn * value_col, const IColumn * scale_col = nullptr)
|
||||
{
|
||||
const auto & value_col_typed = checkAndGetColumn<ColumnVector<T>>(*value_col);
|
||||
auto col_res = ColumnVector<T>::create();
|
||||
|
||||
typename ColumnVector<T>::Container & vec_res = col_res->getData();
|
||||
vec_res.resize(value_col_typed.getData().size());
|
||||
|
||||
if (!vec_res.empty())
|
||||
// Non-const value argument:
|
||||
const auto * value_col_typed = checkAndGetColumn<ColumnVector<T>>(value_col);
|
||||
if (value_col_typed)
|
||||
{
|
||||
if (scale_col == nullptr || isColumnConst(*scale_col))
|
||||
{
|
||||
auto scale_arg = (scale_col == nullptr) ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
|
||||
if (scale_arg == 0)
|
||||
{
|
||||
size_t scale = 1;
|
||||
FunctionRoundingImpl<ScaleMode::Zero>::apply(value_col_typed.getData(), scale, vec_res);
|
||||
}
|
||||
else if (scale_arg > 0)
|
||||
{
|
||||
size_t scale = intExp10(scale_arg);
|
||||
FunctionRoundingImpl<ScaleMode::Positive>::apply(value_col_typed.getData(), scale, vec_res);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t scale = intExp10(-scale_arg);
|
||||
FunctionRoundingImpl<ScaleMode::Negative>::apply(value_col_typed.getData(), scale, vec_res);
|
||||
}
|
||||
}
|
||||
/// Non-const scale argument:
|
||||
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
|
||||
{
|
||||
const auto & value_data = value_col_typed.getData();
|
||||
const auto & scale_data = scale_col_typed->getData();
|
||||
const size_t rows = value_data.size();
|
||||
auto col_res = ColumnVector<T>::create();
|
||||
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
Int64 scale64 = scale_data[i];
|
||||
validateScale(scale64);
|
||||
Scale raw_scale = scale64;
|
||||
typename ColumnVector<T>::Container & vec_res = col_res->getData();
|
||||
vec_res.resize(value_col_typed->getData().size());
|
||||
|
||||
if (raw_scale == 0)
|
||||
if (!vec_res.empty())
|
||||
{
|
||||
// Const scale argument:
|
||||
if (scale_col == nullptr || isColumnConst(*scale_col))
|
||||
{
|
||||
auto scale_arg = (scale_col == nullptr) ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
|
||||
if (scale_arg == 0)
|
||||
{
|
||||
size_t scale = 1;
|
||||
FunctionRoundingImpl<ScaleMode::Zero>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
FunctionRoundingImpl<ScaleMode::Zero>::apply(value_col_typed->getData(), scale, vec_res);
|
||||
}
|
||||
else if (raw_scale > 0)
|
||||
else if (scale_arg > 0)
|
||||
{
|
||||
size_t scale = intExp10(raw_scale);
|
||||
FunctionRoundingImpl<ScaleMode::Positive>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
size_t scale = intExp10(scale_arg);
|
||||
FunctionRoundingImpl<ScaleMode::Positive>::apply(value_col_typed->getData(), scale, vec_res);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t scale = intExp10(-raw_scale);
|
||||
FunctionRoundingImpl<ScaleMode::Negative>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
size_t scale = intExp10(-scale_arg);
|
||||
FunctionRoundingImpl<ScaleMode::Negative>::apply(value_col_typed->getData(), scale, vec_res);
|
||||
}
|
||||
}
|
||||
/// Non-const scale argument:
|
||||
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
|
||||
{
|
||||
const auto & value_data = value_col_typed->getData();
|
||||
const auto & scale_data = scale_col_typed->getData();
|
||||
const size_t rows = value_data.size();
|
||||
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
Int64 scale64 = scale_data[i];
|
||||
validateScale(scale64);
|
||||
Scale raw_scale = scale64;
|
||||
|
||||
if (raw_scale == 0)
|
||||
{
|
||||
size_t scale = 1;
|
||||
FunctionRoundingImpl<ScaleMode::Zero>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
}
|
||||
else if (raw_scale > 0)
|
||||
{
|
||||
size_t scale = intExp10(raw_scale);
|
||||
FunctionRoundingImpl<ScaleMode::Positive>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t scale = intExp10(-raw_scale);
|
||||
FunctionRoundingImpl<ScaleMode::Negative>::applyOne(value_data[i], scale, vec_res[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return col_res;
|
||||
}
|
||||
|
||||
return col_res;
|
||||
// Const value argument:
|
||||
const auto * value_col_typed_const = checkAndGetColumnConst<ColumnVector<T>>(value_col);
|
||||
if (value_col_typed_const)
|
||||
{
|
||||
auto value_col_full = value_col_typed_const->convertToFullColumn();
|
||||
return apply<ScaleType>(value_col_full.get(), scale_col);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
@ -589,38 +601,52 @@ public:
|
||||
template <typename ScaleType>
|
||||
static ColumnPtr apply(const IColumn * value_col, const IColumn * scale_col = nullptr)
|
||||
{
|
||||
const auto & value_col_typed = checkAndGetColumn<ColumnDecimal<T>>(*value_col);
|
||||
const typename ColumnDecimal<T>::Container & vec_src = value_col_typed.getData();
|
||||
|
||||
auto col_res = ColumnDecimal<T>::create(vec_src.size(), value_col_typed.getScale());
|
||||
auto & vec_res = col_res->getData();
|
||||
|
||||
if (!vec_res.empty())
|
||||
// Non-const value argument:
|
||||
const auto * value_col_typed = checkAndGetColumn<ColumnDecimal<T>>(value_col);
|
||||
if (value_col_typed)
|
||||
{
|
||||
if (scale_col == nullptr || isColumnConst(*scale_col))
|
||||
{
|
||||
auto scale_arg = scale_col == nullptr ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(value_col_typed.getData(), value_col_typed.getScale(), vec_res, scale_arg);
|
||||
}
|
||||
/// Non-const scale argument
|
||||
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
|
||||
{
|
||||
const auto & scale = scale_col_typed->getData();
|
||||
const size_t rows = vec_src.size();
|
||||
const typename ColumnDecimal<T>::Container & vec_src = value_col_typed->getData();
|
||||
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
auto col_res = ColumnDecimal<T>::create(vec_src.size(), value_col_typed->getScale());
|
||||
auto & vec_res = col_res->getData();
|
||||
vec_res.resize(vec_src.size());
|
||||
|
||||
if (!vec_res.empty())
|
||||
{
|
||||
/// Const scale argument:
|
||||
if (scale_col == nullptr || isColumnConst(*scale_col))
|
||||
{
|
||||
Int64 scale64 = scale[i];
|
||||
validateScale(scale64);
|
||||
Scale raw_scale = scale64;
|
||||
auto scale_arg = scale_col == nullptr ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(vec_src, value_col_typed->getScale(), vec_res, scale_arg);
|
||||
}
|
||||
/// Non-const scale argument:
|
||||
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
|
||||
{
|
||||
const auto & scale = scale_col_typed->getData();
|
||||
const size_t rows = vec_src.size();
|
||||
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::applyOne(value_col_typed.getElement(i), value_col_typed.getScale(),
|
||||
reinterpret_cast<ColumnDecimal<T>::NativeT&>(col_res->getElement(i)), raw_scale);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
Int64 scale64 = scale[i];
|
||||
validateScale(scale64);
|
||||
Scale raw_scale = scale64;
|
||||
|
||||
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::applyOne(value_col_typed->getElement(i), value_col_typed->getScale(),
|
||||
reinterpret_cast<ColumnDecimal<T>::NativeT&>(col_res->getElement(i)), raw_scale);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return col_res;
|
||||
return col_res;
|
||||
}
|
||||
// Const value argument:
|
||||
const auto * value_col_typed_const = checkAndGetColumnConst<ColumnDecimal<T>>(value_col);
|
||||
if (value_col_typed_const)
|
||||
{
|
||||
auto value_col_full = value_col_typed_const->convertToFullColumn();
|
||||
return apply<ScaleType>(value_col_full.get(), scale_col);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
@ -647,7 +673,7 @@ public:
|
||||
FunctionArgumentDescriptors optional_args{
|
||||
{"N", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), nullptr, "The number of decimal places to round to"},
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, mandatory_args, optional_args);
|
||||
validateFunctionArguments(*this, arguments, mandatory_args, optional_args);
|
||||
|
||||
return arguments[0].type;
|
||||
}
|
||||
@ -671,9 +697,6 @@ public:
|
||||
using ScaleTypes = std::decay_t<decltype(scaleTypes)>;
|
||||
using ScaleType = typename ScaleTypes::RightType;
|
||||
|
||||
if (isColumnConst(*value_arg.column) && !isColumnConst(*scale_column.column))
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale column must be const for const data column");
|
||||
|
||||
res = Dispatcher<DataType, rounding_mode, tie_breaking_mode>::template apply<ScaleType>(value_arg.column.get(), scale_column.column.get());
|
||||
return true;
|
||||
};
|
||||
|
@ -48,7 +48,7 @@ namespace
|
||||
{"json", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeUInt64>());
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
{"URL", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args);
|
||||
}
|
||||
|
||||
static constexpr auto strings_argument_position = 0uz;
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
{"URL", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args);
|
||||
}
|
||||
|
||||
static constexpr auto strings_argument_position = 0uz;
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
{"URL", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args);
|
||||
}
|
||||
|
||||
static constexpr auto strings_argument_position = 0uz;
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
{"URL", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
|
||||
validateFunctionArgumentTypes(func, arguments, mandatory_args);
|
||||
validateFunctionArguments(func, arguments, mandatory_args);
|
||||
}
|
||||
|
||||
void init(const ColumnsWithTypeAndName & /*arguments*/, bool /*max_substrings_includes_remaining_string*/) {}
|
||||
|
@ -51,6 +51,8 @@ public:
|
||||
|
||||
bool isVariadic() const override { return impl.isVariadic(); }
|
||||
size_t getNumberOfArguments() const override { return impl.getNumberOfArguments(); }
|
||||
bool useDefaultImplementationForNulls() const override { return impl.useDefaultImplementationForNulls(); }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return impl.useDefaultImplementationForLowCardinalityColumns(); }
|
||||
bool useDefaultImplementationForConstants() const override { return impl.useDefaultImplementationForConstants(); }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo &) const override { return false; }
|
||||
|
||||
@ -184,7 +186,7 @@ struct MapToNestedAdapter : public MapAdapterBase<MapToNestedAdapter<Name, retur
|
||||
template <typename Name, size_t position>
|
||||
struct MapToSubcolumnAdapter
|
||||
{
|
||||
static_assert(position <= 1);
|
||||
static_assert(position <= 1, "position of Map subcolumn must be 0 or 1");
|
||||
|
||||
static void extractNestedTypes(DataTypes & types)
|
||||
{
|
||||
@ -357,7 +359,7 @@ struct NameMapValues { static constexpr auto name = "mapValues"; };
|
||||
using FunctionMapValues = FunctionMapToArrayAdapter<FunctionIdentity, MapToSubcolumnAdapter<NameMapValues, 1>, NameMapValues>;
|
||||
|
||||
struct NameMapContains { static constexpr auto name = "mapContains"; };
|
||||
using FunctionMapContains = FunctionMapToArrayAdapter<FunctionArrayIndex<HasAction, NameMapContains>, MapToSubcolumnAdapter<NameMapKeys, 0>, NameMapContains>;
|
||||
using FunctionMapContains = FunctionMapToArrayAdapter<FunctionArrayIndex<HasAction, NameMapContains>, MapToSubcolumnAdapter<NameMapContains, 0>, NameMapContains>;
|
||||
|
||||
struct NameMapFilter { static constexpr auto name = "mapFilter"; };
|
||||
using FunctionMapFilter = FunctionMapToArrayAdapter<FunctionArrayFilter, MapToNestedAdapter<NameMapFilter>, NameMapFilter>;
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
{"array_1", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isArray), nullptr, "Array"},
|
||||
{"array_2", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isArray), nullptr, "Array"},
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
return std::make_shared<DataTypeNumber<ResultType>>();
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
{"array", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isArray), nullptr, "Array"},
|
||||
{"samples", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isUInt), isColumnConst, "const UInt*"},
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
|
||||
// Return an array with the same nested type as the input array
|
||||
const DataTypePtr & array_type = arguments[0].type;
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
{"array", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isArray), nullptr, "Array"},
|
||||
{"length", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isInteger), nullptr, "Integer"}
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
validateFunctionArguments(*this, arguments, args);
|
||||
|
||||
const DataTypeArray * array_type = checkAndGetDataType<DataTypeArray>(arguments[0].type.get());
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeArray>(array_type->getNestedType()));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user