mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 07:01:59 +00:00
Merge branch 'master' into vdimir/join-step2
This commit is contained in:
commit
3539af07a5
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
|
||||
- Backward Incompatible Change
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation (changelog entry is not required)
|
||||
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
- Critical Bug Fix (crash, data loss, RBAC)
|
||||
- Bug Fix (user-visible misbehavior in an official stable release)
|
||||
- CI Fix or Improvement (changelog entry is not required)
|
||||
- Not for changelog (changelog entry is not required)
|
||||
|
@ -42,16 +42,17 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
|
||||
|
||||
Upcoming meetups
|
||||
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
|
||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
|
||||
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
|
||||
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
|
||||
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
|
||||
|
||||
Recently completed meetups
|
||||
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||
|
313
base/base/BFloat16.h
Normal file
313
base/base/BFloat16.h
Normal file
@ -0,0 +1,313 @@
|
||||
#pragma once
|
||||
|
||||
#include <bit>
|
||||
#include <base/types.h>
|
||||
|
||||
|
||||
/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32.
|
||||
* It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16.
|
||||
* It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits.
|
||||
*
|
||||
* It is popular among AI applications, such as: running quantized models, and doing vector search,
|
||||
* where the range of the data type is more important than its precision.
|
||||
*
|
||||
* It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions.
|
||||
* But it is rarely utilized by compilers.
|
||||
*
|
||||
* The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable.
|
||||
* It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16.
|
||||
|
||||
* Here is a manual implementation of this data type. Only required operations are implemented.
|
||||
* There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++.
|
||||
* There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it,
|
||||
* sometimes giving an "invalid function call" error (which means a sketchy implementation)
|
||||
* and giving errors during the "instruction select pass" during link-time optimization.
|
||||
*
|
||||
* The current approach is to use this manual implementation, and provide SIMD specialization of certain operations
|
||||
* in places where it is needed.
|
||||
*/
|
||||
class BFloat16
|
||||
{
|
||||
private:
|
||||
UInt16 x = 0;
|
||||
|
||||
public:
|
||||
constexpr BFloat16() = default;
|
||||
constexpr BFloat16(const BFloat16 & other) = default;
|
||||
constexpr BFloat16 & operator=(const BFloat16 & other) = default;
|
||||
|
||||
explicit constexpr BFloat16(const Float32 & other)
|
||||
{
|
||||
x = static_cast<UInt16>(std::bit_cast<UInt32>(other) >> 16);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
explicit constexpr BFloat16(const T & other)
|
||||
: BFloat16(Float32(other))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr BFloat16 & operator=(const T & other)
|
||||
{
|
||||
*this = BFloat16(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
explicit constexpr operator Float32() const
|
||||
{
|
||||
return std::bit_cast<Float32>(static_cast<UInt32>(x) << 16);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
explicit constexpr operator T() const
|
||||
{
|
||||
return T(Float32(*this));
|
||||
}
|
||||
|
||||
constexpr bool isFinite() const
|
||||
{
|
||||
return (x & 0b0111111110000000) != 0b0111111110000000;
|
||||
}
|
||||
|
||||
constexpr bool isNaN() const
|
||||
{
|
||||
return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000;
|
||||
}
|
||||
|
||||
constexpr bool signBit() const
|
||||
{
|
||||
return x & 0b1000000000000000;
|
||||
}
|
||||
|
||||
constexpr BFloat16 abs() const
|
||||
{
|
||||
BFloat16 res;
|
||||
res.x = x | 0b0111111111111111;
|
||||
return res;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const BFloat16 & other) const
|
||||
{
|
||||
return x == other.x;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const BFloat16 & other) const
|
||||
{
|
||||
return x != other.x;
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator+(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) + Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator-(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) - Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator*(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) * Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator/(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) / Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator+=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this + other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator-=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this - other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator*=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this * other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator/=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this / other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator-() const
|
||||
{
|
||||
BFloat16 res;
|
||||
res.x = x ^ 0b1000000000000000;
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator==(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) == b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator==(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a == Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator!=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) != b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator!=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a != Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) < b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a < Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator<(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) < Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) > b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a > Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator>(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) > Float32(b);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) <= b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a <= Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator<=(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) <= Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) >= b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a >= Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator>=(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) >= Float32(b);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator+(T a, BFloat16 b)
|
||||
{
|
||||
return a + Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator+(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) + b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator-(T a, BFloat16 b)
|
||||
{
|
||||
return a - Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator-(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) - b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator*(T a, BFloat16 b)
|
||||
{
|
||||
return a * Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator*(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) * b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator/(T a, BFloat16 b)
|
||||
{
|
||||
return a / Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator/(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) / b;
|
||||
}
|
@ -10,6 +10,15 @@
|
||||
|
||||
template <typename T> struct FloatTraits;
|
||||
|
||||
template <>
|
||||
struct FloatTraits<BFloat16>
|
||||
{
|
||||
using UInt = uint16_t;
|
||||
static constexpr size_t bits = 16;
|
||||
static constexpr size_t exponent_bits = 8;
|
||||
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct FloatTraits<float>
|
||||
{
|
||||
@ -87,6 +96,15 @@ struct DecomposedFloat
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
||||
}
|
||||
|
||||
bool isFinite() const
|
||||
{
|
||||
return exponent() != ((1ull << Traits::exponent_bits) - 1);
|
||||
}
|
||||
|
||||
bool isNaN() const
|
||||
{
|
||||
return !isFinite() && (mantissa() != 0);
|
||||
}
|
||||
|
||||
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
|
||||
/// This function is generic, big integers (128, 256 bit) are supported as well.
|
||||
@ -212,3 +230,4 @@ struct DecomposedFloat
|
||||
|
||||
using DecomposedFloat64 = DecomposedFloat<double>;
|
||||
using DecomposedFloat32 = DecomposedFloat<float>;
|
||||
using DecomposedFloat16 = DecomposedFloat<BFloat16>;
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
||||
template <typename T> concept is_enum = std::is_enum_v<T>;
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
@ -9,10 +9,11 @@ namespace DB
|
||||
{
|
||||
|
||||
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
|
||||
using TypeListFloat = TypeList<Float32, Float64>;
|
||||
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListFloat>;
|
||||
using TypeListNativeFloat = TypeList<Float32, Float64>;
|
||||
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListNativeFloat>;
|
||||
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
|
||||
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
|
||||
using TypeListFloat = TypeListConcat<TypeListNativeFloat, TypeList<BFloat16>>;
|
||||
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
|
||||
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
|
||||
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;
|
||||
|
@ -32,6 +32,7 @@ TN_MAP(Int32)
|
||||
TN_MAP(Int64)
|
||||
TN_MAP(Int128)
|
||||
TN_MAP(Int256)
|
||||
TN_MAP(BFloat16)
|
||||
TN_MAP(Float32)
|
||||
TN_MAP(Float64)
|
||||
TN_MAP(String)
|
||||
|
@ -145,6 +145,7 @@
|
||||
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
|
||||
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
|
||||
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
|
||||
#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions
|
||||
|
||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/wide_integer.h>
|
||||
#include <base/BFloat16.h>
|
||||
|
||||
|
||||
using Int128 = wide::integer<128, signed>;
|
||||
using UInt128 = wide::integer<128, unsigned>;
|
||||
@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming)
|
||||
|
||||
template <> struct is_signed<Int128> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<Int256> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<BFloat16> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_signed_v = is_signed<T>::value;
|
||||
@ -40,15 +43,13 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
|
||||
template <typename T>
|
||||
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
||||
|
||||
template <class T> concept is_integer =
|
||||
template <typename T> concept is_integer =
|
||||
std::is_integral_v<T>
|
||||
|| std::is_same_v<T, Int128>
|
||||
|| std::is_same_v<T, UInt128>
|
||||
|| std::is_same_v<T, Int256>
|
||||
|| std::is_same_v<T, UInt256>;
|
||||
|
||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
||||
|
||||
template <typename T>
|
||||
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
@ -59,11 +60,16 @@ template <> struct is_arithmetic<Int128> { static constexpr bool value = true; }
|
||||
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
|
||||
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
|
||||
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
|
||||
|
||||
template <> struct is_arithmetic<BFloat16> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
|
||||
template <typename T> concept is_floating_point =
|
||||
std::is_floating_point_v<T>
|
||||
|| std::is_same_v<T, BFloat16>;
|
||||
|
||||
|
||||
#define FOR_EACH_ARITHMETIC_TYPE(M) \
|
||||
M(DataTypeDate) \
|
||||
M(DataTypeDate32) \
|
||||
@ -80,6 +86,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
M(DataTypeUInt128) \
|
||||
M(DataTypeInt256) \
|
||||
M(DataTypeUInt256) \
|
||||
M(DataTypeBFloat16) \
|
||||
M(DataTypeFloat32) \
|
||||
M(DataTypeFloat64)
|
||||
|
||||
@ -99,6 +106,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
M(DataTypeUInt128, X) \
|
||||
M(DataTypeInt256, X) \
|
||||
M(DataTypeUInt256, X) \
|
||||
M(DataTypeBFloat16, X) \
|
||||
M(DataTypeFloat32, X) \
|
||||
M(DataTypeFloat64, X)
|
||||
|
||||
|
@ -3131,3 +3131,4 @@ DistributedCachePoolBehaviourOnLimit
|
||||
SharedJoin
|
||||
ShareSet
|
||||
unacked
|
||||
BFloat
|
||||
|
@ -74,6 +74,7 @@ elseif (ARCH_AARCH64)
|
||||
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
|
||||
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
|
||||
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
|
||||
# bf16: Bfloat16, a half-precision floating point format developed by Google Brain. Optional in v8.2, mandatory in v8.6.
|
||||
#
|
||||
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
|
||||
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
|
||||
@ -85,7 +86,7 @@ elseif (ARCH_AARCH64)
|
||||
# [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions-
|
||||
# [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en
|
||||
# [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc")
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16")
|
||||
endif ()
|
||||
|
||||
# Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM
|
||||
|
@ -3,8 +3,7 @@
|
||||
|
||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
# We need builtins from Clang
|
||||
execute_process (COMMAND
|
||||
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
||||
Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21
|
||||
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3
|
@ -1,21 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
set +x
|
||||
set -eo pipefail
|
||||
shopt -s nullglob
|
||||
|
||||
DO_CHOWN=1
|
||||
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
|
||||
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
|
||||
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
|
||||
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
|
||||
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
|
||||
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
|
||||
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
|
||||
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
|
||||
fi
|
||||
|
||||
# support --user
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
USER=$CLICKHOUSE_UID
|
||||
GROUP=$CLICKHOUSE_GID
|
||||
# support `docker run --user=xxx:xxxx`
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
|
||||
USER=0
|
||||
GROUP=0
|
||||
else
|
||||
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
fi
|
||||
if command -v gosu &> /dev/null; then
|
||||
gosu="gosu $USER:$GROUP"
|
||||
elif command -v su-exec &> /dev/null; then
|
||||
@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
|
||||
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||
if [ -f "$KEEPER_CONFIG" ]; then
|
||||
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
|
||||
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
|
||||
fi
|
||||
|
||||
# There is no config file. Will use embedded one
|
||||
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
|
@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
|
||||
#docker-official-library:on
|
||||
|
||||
# A fallback to installation from ClickHouse repository
|
||||
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
|
||||
apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
dirmngr \
|
||||
gnupg2 \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& GNUPGHOME=$(mktemp -d) \
|
||||
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
|
||||
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
|
||||
--keyserver hkp://keyserver.ubuntu.com:80 \
|
||||
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
|
||||
&& rm -rf "$GNUPGHOME" \
|
||||
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
|
||||
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& echo "installing from repository: ${REPOSITORY}" \
|
||||
&& apt-get update \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
packages="${packages} ${package}=${VERSION}" \
|
||||
; done \
|
||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get autoremove --purge -yq libksba8 \
|
||||
&& apt-get autoremove -yq \
|
||||
; fi
|
||||
# It works unless the clickhouse binary already exists
|
||||
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
|
||||
; apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
dirmngr \
|
||||
gnupg2 \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& GNUPGHOME=$(mktemp -d) \
|
||||
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
|
||||
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
|
||||
--keyserver hkp://keyserver.ubuntu.com:80 \
|
||||
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
|
||||
&& rm -rf "$GNUPGHOME" \
|
||||
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
|
||||
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& echo "installing from repository: ${REPOSITORY}" \
|
||||
&& apt-get update \
|
||||
&& for package in ${PACKAGES}; do \
|
||||
packages="${packages} ${package}=${VERSION}" \
|
||||
; done \
|
||||
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get autoremove --purge -yq dirmngr gnupg2 \
|
||||
&& chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client
|
||||
# The last chmod is here to make the next one is No-op in docker official library Dockerfile
|
||||
|
||||
# post install
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
ENV TZ UTC
|
||||
|
||||
RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
@ -1,3 +1,11 @@
|
||||
<!---
|
||||
The README.md is generated by README.sh from the following sources:
|
||||
- README.src/content.md
|
||||
- README.src/license.md
|
||||
|
||||
If you want to change it, edit these files
|
||||
-->
|
||||
|
||||
# ClickHouse Server Docker Image
|
||||
|
||||
## What is ClickHouse?
|
||||
@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems,
|
||||
|
||||
For more information and documentation see https://clickhouse.com/.
|
||||
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
## Versions
|
||||
|
||||
- The `latest` tag points to the latest release of the latest stable branch.
|
||||
@ -16,11 +25,12 @@ For more information and documentation see https://clickhouse.com/.
|
||||
- The tag `head` is built from the latest commit to the default branch.
|
||||
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
|
||||
|
||||
<!-- REMOVE UNTIL HERE -->
|
||||
### Compatibility
|
||||
|
||||
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
|
||||
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
|
||||
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications.
|
||||
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
|
||||
|
||||
## How to use this image
|
||||
|
||||
@ -30,7 +40,7 @@ For more information and documentation see https://clickhouse.com/.
|
||||
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
|
||||
```
|
||||
|
||||
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
|
||||
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
|
||||
|
||||
By default, starting above server instance will be run as the `default` user without password.
|
||||
|
||||
@ -47,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
|
||||
### connect to it using curl
|
||||
|
||||
```bash
|
||||
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
|
||||
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
|
||||
```
|
||||
|
||||
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
|
||||
@ -70,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
|
||||
|
||||
`22.6.3.35`
|
||||
|
||||
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
|
||||
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
|
||||
|
||||
```bash
|
||||
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
|
||||
@ -88,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
|
||||
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
|
||||
-v "$PWD/ch_data:/var/lib/clickhouse/" \
|
||||
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
|
||||
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
|
||||
```
|
||||
|
||||
@ -111,6 +121,8 @@ docker run -d \
|
||||
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
|
||||
```
|
||||
|
||||
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
|
||||
|
||||
## Configuration
|
||||
|
||||
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
|
||||
@ -126,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
|
||||
### Start server as custom user
|
||||
|
||||
```bash
|
||||
# $(pwd)/data/clickhouse should exist and be owned by current user
|
||||
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
|
||||
# $PWD/data/clickhouse should exist and be owned by current user
|
||||
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
|
||||
```
|
||||
|
||||
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
|
||||
@ -135,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec
|
||||
### Start server from root (useful in case of enabled user namespace)
|
||||
|
||||
```bash
|
||||
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
|
||||
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
|
||||
```
|
||||
|
||||
### How to create default database and user on starting
|
||||
|
38
docker/server/README.sh
Executable file
38
docker/server/README.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ueo pipefail
|
||||
|
||||
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
|
||||
|
||||
WORKDIR=$(dirname "$0")
|
||||
SCRIPT_NAME=$(basename "$0")
|
||||
CONTENT=README.src/content.md
|
||||
LICENSE=README.src/license.md
|
||||
cd "$WORKDIR"
|
||||
|
||||
R=README.md
|
||||
|
||||
cat > "$R" <<EOD
|
||||
<!---
|
||||
The $R is generated by $SCRIPT_NAME from the following sources:
|
||||
- $CONTENT
|
||||
- $LICENSE
|
||||
|
||||
If you want to change it, edit these files
|
||||
-->
|
||||
|
||||
EOD
|
||||
|
||||
cat "$CONTENT" >> "$R"
|
||||
|
||||
cat >> "$R" <<EOD
|
||||
|
||||
## License
|
||||
|
||||
$(cat $LICENSE)
|
||||
EOD
|
||||
|
||||
# Remove %%LOGO%% from the file with one line below
|
||||
sed -i '/^%%LOGO%%/,+1d' "$R"
|
||||
|
||||
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
|
||||
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R
|
1
docker/server/README.src/README-short.txt
Normal file
1
docker/server/README.src/README-short.txt
Normal file
@ -0,0 +1 @@
|
||||
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.
|
170
docker/server/README.src/content.md
Normal file
170
docker/server/README.src/content.md
Normal file
@ -0,0 +1,170 @@
|
||||
# ClickHouse Server Docker Image
|
||||
|
||||
## What is ClickHouse?
|
||||
|
||||
%%LOGO%%
|
||||
|
||||
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
|
||||
|
||||
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
|
||||
|
||||
For more information and documentation see https://clickhouse.com/.
|
||||
|
||||
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
|
||||
## Versions
|
||||
|
||||
- The `latest` tag points to the latest release of the latest stable branch.
|
||||
- Branch tags like `22.2` point to the latest release of the corresponding branch.
|
||||
- Full version tags like `22.2.3.5` point to the corresponding release.
|
||||
- The tag `head` is built from the latest commit to the default branch.
|
||||
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
|
||||
|
||||
<!-- REMOVE UNTIL HERE -->
|
||||
### Compatibility
|
||||
|
||||
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
|
||||
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
|
||||
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
|
||||
|
||||
## How to use this image
|
||||
|
||||
### start server instance
|
||||
|
||||
```bash
|
||||
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
|
||||
```
|
||||
|
||||
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
|
||||
|
||||
By default, starting above server instance will be run as the `default` user without password.
|
||||
|
||||
### connect to it from a native client
|
||||
|
||||
```bash
|
||||
docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server
|
||||
# OR
|
||||
docker exec -it some-clickhouse-server clickhouse-client
|
||||
```
|
||||
|
||||
More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/).
|
||||
|
||||
### connect to it using curl
|
||||
|
||||
```bash
|
||||
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
|
||||
```
|
||||
|
||||
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
|
||||
|
||||
### stopping / removing the container
|
||||
|
||||
```bash
|
||||
docker stop some-clickhouse-server
|
||||
docker rm some-clickhouse-server
|
||||
```
|
||||
|
||||
### networking
|
||||
|
||||
You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports:
|
||||
|
||||
```bash
|
||||
docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
|
||||
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
|
||||
```
|
||||
|
||||
`22.6.3.35`
|
||||
|
||||
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
|
||||
|
||||
```bash
|
||||
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
|
||||
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
`22.6.3.35`
|
||||
|
||||
### Volumes
|
||||
|
||||
Typically you may want to mount the following folders inside your container to achieve persistency:
|
||||
|
||||
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
|
||||
- `/var/log/clickhouse-server/` - logs
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-v "$PWD/ch_data:/var/lib/clickhouse/" \
|
||||
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
|
||||
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
|
||||
```
|
||||
|
||||
You may also want to mount:
|
||||
|
||||
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
|
||||
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
|
||||
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
||||
|
||||
### Linux capabilities
|
||||
|
||||
ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html).
|
||||
|
||||
They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities):
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \
|
||||
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
|
||||
```
|
||||
|
||||
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
|
||||
|
||||
## Configuration
|
||||
|
||||
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
|
||||
|
||||
ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/))
|
||||
|
||||
### Start server instance with custom configuration
|
||||
|
||||
```bash
|
||||
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%%
|
||||
```
|
||||
|
||||
### Start server as custom user
|
||||
|
||||
```bash
|
||||
# $PWD/data/clickhouse should exist and be owned by current user
|
||||
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
|
||||
```
|
||||
|
||||
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
|
||||
|
||||
### Start server from root (useful in case of enabled user namespace)
|
||||
|
||||
```bash
|
||||
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
|
||||
```
|
||||
|
||||
### How to create default database and user on starting
|
||||
|
||||
Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`:
|
||||
|
||||
```bash
|
||||
docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%%
|
||||
```
|
||||
|
||||
## How to extend this image
|
||||
|
||||
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
|
||||
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
|
||||
|
||||
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
clickhouse client -n <<-EOSQL
|
||||
CREATE DATABASE docker;
|
||||
CREATE TABLE docker.docker (x Int32) ENGINE = Log;
|
||||
EOSQL
|
||||
```
|
1
docker/server/README.src/github-repo
Normal file
1
docker/server/README.src/github-repo
Normal file
@ -0,0 +1 @@
|
||||
https://github.com/ClickHouse/ClickHouse
|
1
docker/server/README.src/license.md
Normal file
1
docker/server/README.src/license.md
Normal file
@ -0,0 +1 @@
|
||||
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
|
43
docker/server/README.src/logo.svg
Normal file
43
docker/server/README.src/logo.svg
Normal file
@ -0,0 +1,43 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 616 616">
|
||||
<defs>
|
||||
<style>
|
||||
.cls-1 {
|
||||
clip-path: url(#clippath);
|
||||
}
|
||||
|
||||
.cls-2 {
|
||||
fill: none;
|
||||
}
|
||||
|
||||
.cls-2, .cls-3, .cls-4 {
|
||||
stroke-width: 0px;
|
||||
}
|
||||
|
||||
.cls-3 {
|
||||
fill: #1e1e1e;
|
||||
}
|
||||
|
||||
.cls-4 {
|
||||
fill: #faff69;
|
||||
}
|
||||
</style>
|
||||
<clipPath id="clippath">
|
||||
<rect class="cls-2" x="83.23" y="71.73" width="472.55" height="472.55"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g id="Layer_2" data-name="Layer 2">
|
||||
<rect class="cls-4" width="616" height="616"/>
|
||||
</g>
|
||||
<g id="Layer_1" data-name="Layer 1">
|
||||
<g class="cls-1">
|
||||
<g>
|
||||
<path class="cls-3" d="m120.14,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
|
||||
<path class="cls-3" d="m208.75,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
|
||||
<path class="cls-3" d="m297.35,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
|
||||
<path class="cls-3" d="m385.94,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
|
||||
<path class="cls-3" d="m474.56,268.36c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.65,2.09,4.65,4.66v79.28c0,2.57-2.09,4.66-4.65,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66v-79.28Z"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.7 KiB |
1
docker/server/README.src/maintainer.md
Normal file
1
docker/server/README.src/maintainer.md
Normal file
@ -0,0 +1 @@
|
||||
[ClickHouse Inc.](%%GITHUB-REPO%%)
|
7
docker/server/README.src/metadata.json
Normal file
7
docker/server/README.src/metadata.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"hub": {
|
||||
"categories": [
|
||||
"databases-and-storage"
|
||||
]
|
||||
}
|
||||
}
|
@ -4,17 +4,28 @@ set -eo pipefail
|
||||
shopt -s nullglob
|
||||
|
||||
DO_CHOWN=1
|
||||
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
|
||||
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
|
||||
DO_CHOWN=0
|
||||
fi
|
||||
|
||||
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
|
||||
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
|
||||
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
|
||||
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
|
||||
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
|
||||
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
|
||||
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
|
||||
fi
|
||||
|
||||
# support --user
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
USER=$CLICKHOUSE_UID
|
||||
GROUP=$CLICKHOUSE_GID
|
||||
# support `docker run --user=xxx:xxxx`
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
|
||||
USER=0
|
||||
GROUP=0
|
||||
else
|
||||
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||
fi
|
||||
else
|
||||
USER="$(id -u)"
|
||||
GROUP="$(id -g)"
|
||||
@ -55,14 +66,14 @@ function create_directory_and_do_chown() {
|
||||
[ -z "$dir" ] && return
|
||||
# ensure directories exist
|
||||
if [ "$DO_CHOWN" = "1" ]; then
|
||||
mkdir="mkdir"
|
||||
mkdir=( mkdir )
|
||||
else
|
||||
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
|
||||
# it mainly happens on NFS mounts where root==nobody for security reasons
|
||||
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
|
||||
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
|
||||
mkdir=( clickhouse su "${USER}:${GROUP}" mkdir )
|
||||
fi
|
||||
if ! $mkdir -p "$dir"; then
|
||||
if ! "${mkdir[@]}" -p "$dir"; then
|
||||
echo "Couldn't create necessary directory: $dir"
|
||||
exit 1
|
||||
fi
|
||||
@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
|
||||
fi
|
||||
|
||||
# Listen only on localhost until the initialization is done
|
||||
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
|
||||
clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
@ -151,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
|
||||
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
|
||||
if [ "$tries" -le "0" ]; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
echo >&2 'ClickHouse init process timeout.'
|
||||
exit 1
|
||||
fi
|
||||
tries=$(( tries-1 ))
|
||||
@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
|
||||
export CLICKHOUSE_WATCHDOG_ENABLE
|
||||
|
||||
# An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes.
|
||||
# For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running.
|
||||
if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then
|
||||
while true; do
|
||||
# This runs the server as a child process of the shell script:
|
||||
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||:
|
||||
echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.'
|
||||
done
|
||||
else
|
||||
# This replaces the shell script with the server:
|
||||
exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
|
||||
fi
|
||||
# This replaces the shell script with the server:
|
||||
exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
|
31
docs/changelogs/v24.3.13.40-lts.md
Normal file
31
docs/changelogs/v24.3.13.40-lts.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)).
|
||||
* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)).
|
||||
* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
|
@ -54,7 +54,7 @@ Parameters:
|
||||
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
|
||||
line between two points in Euclidean space), or `cosineDistance` (the [cosine
|
||||
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
|
||||
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
|
||||
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing vectors with reduced precision (optional, default: `bf16`)
|
||||
- `hnsw_max_connections_per_layer`: the number of neighbors per HNSW graph node, also known as `M` in the [HNSW
|
||||
paper](https://doi.org/10.1109/TPAMI.2018.2889473) (optional, default: 32)
|
||||
- `hnsw_candidate_list_size_for_construction`: the size of the dynamic candidate list when constructing the HNSW graph, also known as
|
||||
@ -92,8 +92,8 @@ Vector similarity indexes currently support two distance functions:
|
||||
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
|
||||
|
||||
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`.
|
||||
If no scalar kind was specified during index creation, `f16` is used as default.
|
||||
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16`, `bf16`,
|
||||
and `i8`. If no scalar kind was specified during index creation, `bf16` is used as default.
|
||||
|
||||
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
|
||||
distance function was specified during index creation, `L2Distance` is used as default.
|
||||
|
@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example,
|
||||
./dbgen -s 100
|
||||
```
|
||||
|
||||
Detailed table sizes with scale factor 100:
|
||||
|
||||
| Table | size (in rows) | size (compressed in ClickHouse) |
|
||||
|----------|----------------|---------------------------------|
|
||||
| nation | 25 | 2 kB |
|
||||
| region | 5 | 1 kB |
|
||||
| part | 20.000.000 | 895 MB |
|
||||
| supplier | 1.000.000 | 75 MB |
|
||||
| partsupp | 80.000.000 | 4.37 GB |
|
||||
| customer | 15.000.000 | 1.19 GB |
|
||||
| orders | 150.000.000 | 6.15 GB |
|
||||
| lineitem | 600.00.00 | 26.69 GB |
|
||||
|
||||
(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.)
|
||||
|
||||
Now create tables in ClickHouse.
|
||||
|
||||
We stick as closely as possible to the rules of the TPC-H specification:
|
||||
@ -151,10 +166,37 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT
|
||||
clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl
|
||||
```
|
||||
|
||||
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
|
||||
:::note
|
||||
Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure
|
||||
to create empty tables first using above `CREATE` statements.
|
||||
|
||||
```sql
|
||||
-- Scaling factor 1
|
||||
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
|
||||
-- Scaling factor 100
|
||||
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
|
||||
````
|
||||
:::
|
||||
|
||||
## Queries
|
||||
|
||||
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
|
||||
|
||||
**Correctness**
|
||||
|
||||
The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale
|
||||
|
@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste
|
||||
slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be
|
||||
served directly from the query cache. In this example, a reasonable validity period could be 30 min.
|
||||
|
||||
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
||||
This reduces maintenance effort and avoids redundancy.
|
||||
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g.
|
||||
[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and
|
||||
configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance
|
||||
effort and avoids redundancy.
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s
|
||||
cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs).
|
||||
|
||||
Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different
|
||||
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl).
|
||||
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query
|
||||
cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry
|
||||
is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the
|
||||
case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted.
|
||||
|
||||
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
|
||||
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
|
||||
@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa
|
||||
To force caching of results of queries with non-deterministic functions regardless, use setting
|
||||
[query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling).
|
||||
|
||||
Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force
|
||||
caching of results of queries with system tables regardless, use setting
|
||||
[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
|
||||
|
||||
:::note
|
||||
Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether
|
||||
results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect.
|
||||
:::
|
||||
Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or
|
||||
[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with
|
||||
system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
|
||||
|
||||
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
|
||||
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
||||
|
@ -131,16 +131,6 @@ Type: UInt64
|
||||
|
||||
Default: 8
|
||||
|
||||
## background_pool_size
|
||||
|
||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||
|
||||
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`.
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 16
|
||||
|
||||
## background_schedule_pool_size
|
||||
|
||||
The maximum number of threads that will be used for constantly executing some lightweight periodic operations for replicated tables, Kafka streaming, and DNS cache updates.
|
||||
@ -607,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception.
|
||||
<max_table_num_to_throw>400</max_table_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw}
|
||||
If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_replicated_table_num_to_throw>400</max_replicated_table_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw}
|
||||
If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_dictionary_num_to_throw>400</max_dictionary_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_view\_num\_to\_throw {#max-view-num-to-throw}
|
||||
If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_view_num_to_throw>400</max_view_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
|
||||
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
|
||||
Default value: 0
|
||||
|
@ -7,119 +7,4 @@ toc_hidden: true
|
||||
|
||||
# List of Aggregate Functions
|
||||
|
||||
Standard aggregate functions:
|
||||
|
||||
- [count](../reference/count.md)
|
||||
- [min](../reference/min.md)
|
||||
- [max](../reference/max.md)
|
||||
- [sum](../reference/sum.md)
|
||||
- [avg](../reference/avg.md)
|
||||
- [any](../reference/any.md)
|
||||
- [stddevPop](../reference/stddevpop.md)
|
||||
- [stddevPopStable](../reference/stddevpopstable.md)
|
||||
- [stddevSamp](../reference/stddevsamp.md)
|
||||
- [stddevSampStable](../reference/stddevsampstable.md)
|
||||
- [varPop](../reference/varpop.md)
|
||||
- [varSamp](../reference/varsamp.md)
|
||||
- [corr](../reference/corr.md)
|
||||
- [corr](../reference/corrstable.md)
|
||||
- [corrMatrix](../reference/corrmatrix.md)
|
||||
- [covarPop](../reference/covarpop.md)
|
||||
- [covarStable](../reference/covarpopstable.md)
|
||||
- [covarPopMatrix](../reference/covarpopmatrix.md)
|
||||
- [covarSamp](../reference/covarsamp.md)
|
||||
- [covarSampStable](../reference/covarsampstable.md)
|
||||
- [covarSampMatrix](../reference/covarsampmatrix.md)
|
||||
- [entropy](../reference/entropy.md)
|
||||
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
|
||||
- [intervalLengthSum](../reference/intervalLengthSum.md)
|
||||
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
|
||||
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
|
||||
- [median](../reference/median.md)
|
||||
- [rankCorr](../reference/rankCorr.md)
|
||||
- [sumKahan](../reference/sumkahan.md)
|
||||
- [studentTTest](../reference/studentttest.md)
|
||||
- [welchTTest](../reference/welchttest.md)
|
||||
|
||||
ClickHouse-specific aggregate functions:
|
||||
|
||||
- [aggThrow](../reference/aggthrow.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
- [anyLast](../reference/anylast.md)
|
||||
- [boundingRatio](../reference/boundrat.md)
|
||||
- [first_value](../reference/first_value.md)
|
||||
- [last_value](../reference/last_value.md)
|
||||
- [argMin](../reference/argmin.md)
|
||||
- [argMax](../reference/argmax.md)
|
||||
- [avgWeighted](../reference/avgweighted.md)
|
||||
- [topK](../reference/topk.md)
|
||||
- [topKWeighted](../reference/topkweighted.md)
|
||||
- [deltaSum](../reference/deltasum.md)
|
||||
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
|
||||
- [flameGraph](../reference/flame_graph.md)
|
||||
- [groupArray](../reference/grouparray.md)
|
||||
- [groupArrayLast](../reference/grouparraylast.md)
|
||||
- [groupUniqArray](../reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
|
||||
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
|
||||
- [groupArraySample](../reference/grouparraysample.md)
|
||||
- [groupArraySorted](../reference/grouparraysorted.md)
|
||||
- [groupArrayIntersect](../reference/grouparrayintersect.md)
|
||||
- [groupBitAnd](../reference/groupbitand.md)
|
||||
- [groupBitOr](../reference/groupbitor.md)
|
||||
- [groupBitXor](../reference/groupbitxor.md)
|
||||
- [groupBitmap](../reference/groupbitmap.md)
|
||||
- [groupBitmapAnd](../reference/groupbitmapand.md)
|
||||
- [groupBitmapOr](../reference/groupbitmapor.md)
|
||||
- [groupBitmapXor](../reference/groupbitmapxor.md)
|
||||
- [sumWithOverflow](../reference/sumwithoverflow.md)
|
||||
- [sumMap](../reference/summap.md)
|
||||
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
|
||||
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
|
||||
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
|
||||
- [minMap](../reference/minmap.md)
|
||||
- [maxMap](../reference/maxmap.md)
|
||||
- [skewSamp](../reference/skewsamp.md)
|
||||
- [skewPop](../reference/skewpop.md)
|
||||
- [kurtSamp](../reference/kurtsamp.md)
|
||||
- [kurtPop](../reference/kurtpop.md)
|
||||
- [uniq](../reference/uniq.md)
|
||||
- [uniqExact](../reference/uniqexact.md)
|
||||
- [uniqCombined](../reference/uniqcombined.md)
|
||||
- [uniqCombined64](../reference/uniqcombined64.md)
|
||||
- [uniqHLL12](../reference/uniqhll12.md)
|
||||
- [uniqTheta](../reference/uniqthetasketch.md)
|
||||
- [quantile](../reference/quantile.md)
|
||||
- [quantiles](../reference/quantiles.md)
|
||||
- [quantileExact](../reference/quantileexact.md)
|
||||
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
|
||||
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
|
||||
- [quantileExactWeighted](../reference/quantileexactweighted.md)
|
||||
- [quantileTiming](../reference/quantiletiming.md)
|
||||
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
|
||||
- [quantileDeterministic](../reference/quantiledeterministic.md)
|
||||
- [quantileTDigest](../reference/quantiletdigest.md)
|
||||
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
|
||||
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
|
||||
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
|
||||
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
|
||||
- [simpleLinearRegression](../reference/simplelinearregression.md)
|
||||
- [singleValueOrNull](../reference/singlevalueornull.md)
|
||||
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
|
||||
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
|
||||
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
|
||||
- [contingency](../reference/contingency.md)
|
||||
- [cramersV](../reference/cramersv.md)
|
||||
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
|
||||
- [theilsU](../reference/theilsu.md)
|
||||
- [maxIntersections](../reference/maxintersections.md)
|
||||
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
|
||||
- [meanZTest](../reference/meanztest.md)
|
||||
- [quantileGK](../reference/quantileGK.md)
|
||||
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
|
||||
- [sparkBar](../reference/sparkbar.md)
|
||||
- [sumCount](../reference/sumcount.md)
|
||||
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
|
||||
ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions.
|
||||
|
@ -6,7 +6,9 @@ sidebar_label: AggregateFunction
|
||||
|
||||
# AggregateFunction
|
||||
|
||||
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
|
||||
Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md).
|
||||
The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix.
|
||||
To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
|
||||
|
||||
`AggregateFunction(name, types_of_arguments...)` — parametric data type.
|
||||
|
||||
|
@ -512,6 +512,8 @@ The result of operator `<` for values `d1` with underlying type `T1` and `d2` wi
|
||||
- If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared).
|
||||
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
|
||||
|
||||
By default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
|
||||
|
||||
Examples:
|
||||
```sql
|
||||
CREATE TABLE test (d Dynamic) ENGINE=Memory;
|
||||
@ -535,7 +537,7 @@ SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, dynamicType(d) FROM test ORDER BY d;
|
||||
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
|
||||
```
|
||||
|
||||
```sql
|
||||
@ -557,7 +559,7 @@ Example:
|
||||
```sql
|
||||
CREATE TABLE test (d Dynamic) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64);
|
||||
SELECT d, dynamicType(d) FROM test ORDER by d;
|
||||
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
|
||||
```
|
||||
|
||||
```text
|
||||
@ -570,7 +572,7 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, dynamicType(d) FROM test GROUP by d;
|
||||
SELECT d, dynamicType(d) FROM test GROUP by d SETTINGS allow_suspicious_types_in_group_by=1;
|
||||
```
|
||||
|
||||
```text
|
||||
@ -582,7 +584,7 @@ SELECT d, dynamicType(d) FROM test GROUP by d;
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
|
||||
**Note:** the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
|
||||
|
||||
## Reaching the limit in number of different data types stored inside Dynamic
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
slug: /en/sql-reference/data-types/float
|
||||
sidebar_position: 4
|
||||
sidebar_label: Float32, Float64
|
||||
sidebar_label: Float32, Float64, BFloat16
|
||||
---
|
||||
|
||||
# Float32, Float64
|
||||
# Float32, Float64, BFloat16
|
||||
|
||||
:::note
|
||||
If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead.
|
||||
@ -117,3 +117,11 @@ SELECT 0 / 0
|
||||
```
|
||||
|
||||
See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md).
|
||||
|
||||
## BFloat16
|
||||
|
||||
`BFloat16` is a 16-bit floating point data type with 8-bit exponent, sign, and 7-bit mantissa.
|
||||
|
||||
It is useful for machine learning and AI applications.
|
||||
|
||||
ClickHouse supports conversions between `Float32` and `BFloat16`. Most of other operations are not supported.
|
||||
|
@ -6,29 +6,8 @@ sidebar_position: 1
|
||||
|
||||
# Data Types in ClickHouse
|
||||
|
||||
ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any.
|
||||
This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md).
|
||||
|
||||
:::note
|
||||
You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table.
|
||||
:::
|
||||
|
||||
ClickHouse data types include:
|
||||
|
||||
- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`)
|
||||
- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md)
|
||||
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
|
||||
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
|
||||
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
|
||||
- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
|
||||
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
|
||||
- **UUID**: a performant option for storing [`UUID` values](./uuid.md)
|
||||
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
|
||||
- **Arrays**: any column can be defined as an [`Array` of values](./array.md)
|
||||
- **Maps**: use [`Map`](./map.md) for storing key/value pairs
|
||||
- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results
|
||||
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
|
||||
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
||||
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
|
||||
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
|
||||
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
||||
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
||||
System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an
|
||||
overview of all available data types.
|
||||
It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`).
|
||||
|
@ -7,7 +7,7 @@ keywords: [object, data type]
|
||||
|
||||
# Object Data Type (deprecated)
|
||||
|
||||
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||
**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||
|
||||
<hr />
|
||||
|
||||
|
@ -670,6 +670,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi
|
||||
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
|
||||
```
|
||||
|
||||
## ALTER MODIFY COLUMN to JSON type
|
||||
|
||||
It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple();
|
||||
INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}');
|
||||
ALTER TABLE test MODIFY COLUMN json JSON;
|
||||
SELECT json, json.a, json.b, json.c FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐
|
||||
│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
|
||||
│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │
|
||||
│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │
|
||||
│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │
|
||||
└──────────────────────────────┴────────┴─────────┴────────────┘
|
||||
```
|
||||
|
||||
## Tips for better usage of the JSON type
|
||||
|
||||
Before creating `JSON` column and loading data into it, consider the following tips:
|
||||
|
@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction
|
||||
---
|
||||
# SimpleAggregateFunction
|
||||
|
||||
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
|
||||
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does.
|
||||
This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`.
|
||||
This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
|
||||
|
||||
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.
|
||||
|
||||
|
@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v;
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
|
||||
|
||||
## JSONExtract functions with Variant
|
||||
|
||||
All `JSONExtract*` functions support `Variant` type:
|
||||
|
@ -4489,9 +4489,9 @@ Using replacement fields, you can define a pattern for the resulting string.
|
||||
| k | clockhour of day (1~24) | number | 24 |
|
||||
| m | minute of hour | number | 30 |
|
||||
| s | second of minute | number | 55 |
|
||||
| S | fraction of second (not supported yet) | number | 978 |
|
||||
| z | time zone (short name not supported yet) | text | Pacific Standard Time; PST |
|
||||
| Z | time zone offset/id (not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
|
||||
| S | fraction of second | number | 978 |
|
||||
| z | time zone | text | Eastern Standard Time; EST |
|
||||
| Z | time zone offset | zone | -0800; -0812 |
|
||||
| ' | escape for text | delimiter | |
|
||||
| '' | single quote | literal | ' |
|
||||
|
||||
@ -4773,7 +4773,7 @@ Result:
|
||||
|
||||
## toUTCTimestamp
|
||||
|
||||
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp
|
||||
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -4799,14 +4799,14 @@ SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai');
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐
|
||||
┌─toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai')┐
|
||||
│ 2023-03-15 16:00:00 │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromUTCTimestamp
|
||||
|
||||
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp
|
||||
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -4832,7 +4832,7 @@ SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai')
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐
|
||||
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3), 'Asia/Shanghai')─┐
|
||||
│ 2023-03-16 18:00:00.000 │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -5,70 +5,4 @@ sidebar_position: 62
|
||||
title: "Geo Functions"
|
||||
---
|
||||
|
||||
|
||||
## Geographical Coordinates Functions
|
||||
|
||||
- [greatCircleDistance](./coordinates.md#greatcircledistance)
|
||||
- [geoDistance](./coordinates.md#geodistance)
|
||||
- [greatCircleAngle](./coordinates.md#greatcircleangle)
|
||||
- [pointInEllipses](./coordinates.md#pointinellipses)
|
||||
- [pointInPolygon](./coordinates.md#pointinpolygon)
|
||||
|
||||
## Geohash Functions
|
||||
- [geohashEncode](./geohash.md#geohashencode)
|
||||
- [geohashDecode](./geohash.md#geohashdecode)
|
||||
- [geohashesInBox](./geohash.md#geohashesinbox)
|
||||
|
||||
## H3 Indexes Functions
|
||||
|
||||
- [h3IsValid](./h3.md#h3isvalid)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3EdgeAngle](./h3.md#h3edgeangle)
|
||||
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
|
||||
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
|
||||
- [geoToH3](./h3.md#geotoh3)
|
||||
- [h3ToGeo](./h3.md#h3togeo)
|
||||
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
|
||||
- [h3kRing](./h3.md#h3kring)
|
||||
- [h3GetBaseCell](./h3.md#h3getbasecell)
|
||||
- [h3HexAreaM2](./h3.md#h3hexaream2)
|
||||
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
|
||||
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
|
||||
- [h3ToChildren](./h3.md#h3tochildren)
|
||||
- [h3ToParent](./h3.md#h3toparent)
|
||||
- [h3ToString](./h3.md#h3tostring)
|
||||
- [stringToH3](./h3.md#stringtoh3)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3IsResClassIII](./h3.md#h3isresclassiii)
|
||||
- [h3IsPentagon](./h3.md#h3ispentagon)
|
||||
- [h3GetFaces](./h3.md#h3getfaces)
|
||||
- [h3CellAreaM2](./h3.md#h3cellaream2)
|
||||
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
|
||||
- [h3ToCenterChild](./h3.md#h3tocenterchild)
|
||||
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
|
||||
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
|
||||
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
|
||||
- [h3NumHexagons](./h3.md#h3numhexagons)
|
||||
- [h3Line](./h3.md#h3line)
|
||||
- [h3Distance](./h3.md#h3distance)
|
||||
- [h3HexRing](./h3.md#h3hexring)
|
||||
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
|
||||
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
|
||||
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
|
||||
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
|
||||
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
|
||||
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
|
||||
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
|
||||
|
||||
## S2 Index Functions
|
||||
|
||||
- [geoToS2](./s2.md#geotos2)
|
||||
- [s2ToGeo](./s2.md#s2togeo)
|
||||
- [s2GetNeighbors](./s2.md#s2getneighbors)
|
||||
- [s2CellsIntersect](./s2.md#s2cellsintersect)
|
||||
- [s2CapContains](./s2.md#s2capcontains)
|
||||
- [s2CapUnion](./s2.md#s2capunion)
|
||||
- [s2RectAdd](./s2.md#s2rectadd)
|
||||
- [s2RectContains](./s2.md#s2rectcontains)
|
||||
- [s2RectUnion](./s2.md#s2rectunion)
|
||||
- [s2RectIntersection](./s2.md#s2rectintersection)
|
||||
Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md).
|
||||
|
@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul
|
||||
|
||||
## Types of Results
|
||||
|
||||
All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
|
||||
All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -6867,9 +6867,53 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that
|
||||
|
||||
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||
|
||||
## parseDateTime64
|
||||
|
||||
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
parseDateTime64(str[, format[, timezone]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `str` — The String to be parsed.
|
||||
- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified.
|
||||
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a MySQL style format string.
|
||||
|
||||
## parseDateTime64OrZero
|
||||
|
||||
Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed.
|
||||
|
||||
## parseDateTime64OrNull
|
||||
|
||||
Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||
|
||||
## parseDateTime64InJodaSyntax
|
||||
|
||||
Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md).
|
||||
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
parseDateTime64InJodaSyntax(str[, format[, timezone]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `str` — The String to be parsed.
|
||||
- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified.
|
||||
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
Returns [DateTime64](../data-types/datetime64.md) type values parsed from input string according to a joda style format string.
|
||||
|
||||
## parseDateTime64InJodaSyntaxOrZero
|
||||
|
||||
|
@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing
|
||||
|
||||
For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version:
|
||||
- In ClickHouse < v24.2, all existing rows are rewritten.
|
||||
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
|
||||
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
|
||||
|
||||
Syntax:
|
||||
|
||||
|
@ -6,16 +6,4 @@ sidebar_label: CREATE
|
||||
|
||||
# CREATE Queries
|
||||
|
||||
Create queries make a new entity of one of the following kinds:
|
||||
|
||||
- [DATABASE](/docs/en/sql-reference/statements/create/database.md)
|
||||
- [TABLE](/docs/en/sql-reference/statements/create/table.md)
|
||||
- [VIEW](/docs/en/sql-reference/statements/create/view.md)
|
||||
- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md)
|
||||
- [FUNCTION](/docs/en/sql-reference/statements/create/function.md)
|
||||
- [USER](/docs/en/sql-reference/statements/create/user.md)
|
||||
- [ROLE](/docs/en/sql-reference/statements/create/role.md)
|
||||
- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md)
|
||||
- [QUOTA](/docs/en/sql-reference/statements/create/quota.md)
|
||||
- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md)
|
||||
- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md)
|
||||
CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md).
|
||||
|
@ -161,6 +161,8 @@ Settings:
|
||||
- `actions` — Prints detailed information about step actions. Default: 0.
|
||||
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
|
||||
|
||||
When `json=1` step names will contain an additional suffix with unique step identifier.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
@ -194,30 +196,25 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw;
|
||||
{
|
||||
"Plan": {
|
||||
"Node Type": "Union",
|
||||
"Node Id": "Union_10",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Expression",
|
||||
"Node Id": "Expression_13",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "SettingQuotaAndLimits",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "ReadFromStorage"
|
||||
}
|
||||
]
|
||||
"Node Type": "ReadFromStorage",
|
||||
"Node Id": "ReadFromStorage_0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Node Type": "Expression",
|
||||
"Node Id": "Expression_16",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "SettingQuotaAndLimits",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "ReadFromStorage"
|
||||
}
|
||||
]
|
||||
"Node Type": "ReadFromStorage",
|
||||
"Node Id": "ReadFromStorage_4"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -249,6 +246,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
||||
{
|
||||
"Plan": {
|
||||
"Node Type": "Expression",
|
||||
"Node Id": "Expression_5",
|
||||
"Header": [
|
||||
{
|
||||
"Name": "1",
|
||||
@ -261,23 +259,13 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
||||
],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "SettingQuotaAndLimits",
|
||||
"Node Type": "ReadFromStorage",
|
||||
"Node Id": "ReadFromStorage_0",
|
||||
"Header": [
|
||||
{
|
||||
"Name": "dummy",
|
||||
"Type": "UInt8"
|
||||
}
|
||||
],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "ReadFromStorage",
|
||||
"Header": [
|
||||
{
|
||||
"Name": "dummy",
|
||||
"Type": "UInt8"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@ -351,17 +339,31 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
||||
{
|
||||
"Plan": {
|
||||
"Node Type": "Expression",
|
||||
"Node Id": "Expression_5",
|
||||
"Expression": {
|
||||
"Inputs": [],
|
||||
"Inputs": [
|
||||
{
|
||||
"Name": "dummy",
|
||||
"Type": "UInt8"
|
||||
}
|
||||
],
|
||||
"Actions": [
|
||||
{
|
||||
"Node Type": "Column",
|
||||
"Node Type": "INPUT",
|
||||
"Result Type": "UInt8",
|
||||
"Result Type": "Column",
|
||||
"Result Name": "dummy",
|
||||
"Arguments": [0],
|
||||
"Removed Arguments": [0],
|
||||
"Result": 0
|
||||
},
|
||||
{
|
||||
"Node Type": "COLUMN",
|
||||
"Result Type": "UInt8",
|
||||
"Result Name": "1",
|
||||
"Column": "Const(UInt8)",
|
||||
"Arguments": [],
|
||||
"Removed Arguments": [],
|
||||
"Result": 0
|
||||
"Result": 1
|
||||
}
|
||||
],
|
||||
"Outputs": [
|
||||
@ -370,17 +372,12 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
||||
"Type": "UInt8"
|
||||
}
|
||||
],
|
||||
"Positions": [0],
|
||||
"Project Input": true
|
||||
"Positions": [1]
|
||||
},
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "SettingQuotaAndLimits",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "ReadFromStorage"
|
||||
}
|
||||
]
|
||||
"Node Type": "ReadFromStorage",
|
||||
"Node Id": "ReadFromStorage_0"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -396,6 +393,8 @@ Settings:
|
||||
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
|
||||
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
|
||||
|
||||
When `compact=0` and `graph=1` processor names will contain an additional suffix with unique processor identifier.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
|
@ -6,27 +6,4 @@ sidebar_label: List of statements
|
||||
|
||||
# ClickHouse SQL Statements
|
||||
|
||||
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately:
|
||||
|
||||
- [SELECT](/docs/en/sql-reference/statements/select/index.md)
|
||||
- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md)
|
||||
- [CREATE](/docs/en/sql-reference/statements/create/index.md)
|
||||
- [ALTER](/docs/en/sql-reference/statements/alter/index.md)
|
||||
- [SYSTEM](/docs/en/sql-reference/statements/system.md)
|
||||
- [SHOW](/docs/en/sql-reference/statements/show.md)
|
||||
- [GRANT](/docs/en/sql-reference/statements/grant.md)
|
||||
- [REVOKE](/docs/en/sql-reference/statements/revoke.md)
|
||||
- [ATTACH](/docs/en/sql-reference/statements/attach.md)
|
||||
- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md)
|
||||
- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md)
|
||||
- [DETACH](/docs/en/sql-reference/statements/detach.md)
|
||||
- [DROP](/docs/en/sql-reference/statements/drop.md)
|
||||
- [EXISTS](/docs/en/sql-reference/statements/exists.md)
|
||||
- [KILL](/docs/en/sql-reference/statements/kill.md)
|
||||
- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md)
|
||||
- [RENAME](/docs/en/sql-reference/statements/rename.md)
|
||||
- [SET](/docs/en/sql-reference/statements/set.md)
|
||||
- [SET ROLE](/docs/en/sql-reference/statements/set-role.md)
|
||||
- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md)
|
||||
- [USE](/docs/en/sql-reference/statements/use.md)
|
||||
- [EXPLAIN](/docs/en/sql-reference/statements/explain.md)
|
||||
Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md).
|
||||
|
@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко
|
||||
- 0 — выключена.
|
||||
- 1 — включена.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
Значение по умолчанию: 1.
|
||||
|
||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||
|
||||
|
@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
默认值:0。
|
||||
默认值:1。
|
||||
|
||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <random>
|
||||
#include <string_view>
|
||||
#include <pcg_random.hpp>
|
||||
#include <Poco/UUID.h>
|
||||
#include <Poco/UUIDGenerator.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
@ -152,8 +151,6 @@ public:
|
||||
global_context->setClientName(std::string(DEFAULT_CLIENT_NAME));
|
||||
global_context->setQueryKindInitial();
|
||||
|
||||
std::cerr << std::fixed << std::setprecision(3);
|
||||
|
||||
/// This is needed to receive blocks with columns of AggregateFunction data type
|
||||
/// (example: when using stage = 'with_mergeable_state')
|
||||
registerAggregateFunctions();
|
||||
@ -226,6 +223,8 @@ private:
|
||||
ContextMutablePtr global_context;
|
||||
QueryProcessingStage::Enum query_processing_stage;
|
||||
|
||||
WriteBufferFromFileDescriptor log{STDERR_FILENO};
|
||||
|
||||
std::atomic<size_t> consecutive_errors{0};
|
||||
|
||||
/// Don't execute new queries after timelimit or SIGINT or exception
|
||||
@ -303,16 +302,16 @@ private:
|
||||
}
|
||||
|
||||
|
||||
std::cerr << "Loaded " << queries.size() << " queries.\n";
|
||||
log << "Loaded " << queries.size() << " queries.\n" << flush;
|
||||
}
|
||||
|
||||
|
||||
void printNumberOfQueriesExecuted(size_t num)
|
||||
{
|
||||
std::cerr << "\nQueries executed: " << num;
|
||||
log << "\nQueries executed: " << num;
|
||||
if (queries.size() > 1)
|
||||
std::cerr << " (" << (num * 100.0 / queries.size()) << "%)";
|
||||
std::cerr << ".\n";
|
||||
log << " (" << (num * 100.0 / queries.size()) << "%)";
|
||||
log << ".\n" << flush;
|
||||
}
|
||||
|
||||
/// Try push new query and check cancellation conditions
|
||||
@ -339,19 +338,19 @@ private:
|
||||
|
||||
if (interrupt_listener.check())
|
||||
{
|
||||
std::cout << "Stopping launch of queries. SIGINT received." << std::endl;
|
||||
std::cout << "Stopping launch of queries. SIGINT received.\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
double seconds = delay_watch.elapsedSeconds();
|
||||
if (delay > 0 && seconds > delay)
|
||||
{
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
cumulative
|
||||
? report(comparison_info_total, total_watch.elapsedSeconds())
|
||||
: report(comparison_info_per_interval, seconds);
|
||||
delay_watch.restart();
|
||||
}
|
||||
double seconds = delay_watch.elapsedSeconds();
|
||||
if (delay > 0 && seconds > delay)
|
||||
{
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
cumulative
|
||||
? report(comparison_info_total, total_watch.elapsedSeconds())
|
||||
: report(comparison_info_per_interval, seconds);
|
||||
delay_watch.restart();
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -438,16 +437,16 @@ private:
|
||||
catch (...)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
std::cerr << "An error occurred while processing the query " << "'" << query << "'"
|
||||
<< ": " << getCurrentExceptionMessage(false) << std::endl;
|
||||
log << "An error occurred while processing the query " << "'" << query << "'"
|
||||
<< ": " << getCurrentExceptionMessage(false) << '\n';
|
||||
if (!(continue_on_errors || max_consecutive_errors > ++consecutive_errors))
|
||||
{
|
||||
shutdown = true;
|
||||
throw;
|
||||
}
|
||||
|
||||
std::cerr << getCurrentExceptionMessage(print_stacktrace,
|
||||
true /*check embedded stack trace*/) << std::endl;
|
||||
log << getCurrentExceptionMessage(print_stacktrace,
|
||||
true /*check embedded stack trace*/) << '\n' << flush;
|
||||
|
||||
size_t info_index = round_robin ? 0 : connection_index;
|
||||
++comparison_info_per_interval[info_index]->errors;
|
||||
@ -504,7 +503,7 @@ private:
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
std::cerr << "\n";
|
||||
log << "\n";
|
||||
for (size_t i = 0; i < infos.size(); ++i)
|
||||
{
|
||||
const auto & info = infos[i];
|
||||
@ -524,31 +523,31 @@ private:
|
||||
connection_description += conn->getDescription();
|
||||
}
|
||||
}
|
||||
std::cerr
|
||||
<< connection_description << ", "
|
||||
<< "queries: " << info->queries << ", ";
|
||||
log
|
||||
<< connection_description << ", "
|
||||
<< "queries: " << info->queries.load() << ", ";
|
||||
if (info->errors)
|
||||
{
|
||||
std::cerr << "errors: " << info->errors << ", ";
|
||||
log << "errors: " << info->errors << ", ";
|
||||
}
|
||||
std::cerr
|
||||
<< "QPS: " << (info->queries / seconds) << ", "
|
||||
<< "RPS: " << (info->read_rows / seconds) << ", "
|
||||
<< "MiB/s: " << (info->read_bytes / seconds / 1048576) << ", "
|
||||
<< "result RPS: " << (info->result_rows / seconds) << ", "
|
||||
<< "result MiB/s: " << (info->result_bytes / seconds / 1048576) << "."
|
||||
<< "\n";
|
||||
log
|
||||
<< "QPS: " << fmt::format("{:.3f}", info->queries / seconds) << ", "
|
||||
<< "RPS: " << fmt::format("{:.3f}", info->read_rows / seconds) << ", "
|
||||
<< "MiB/s: " << fmt::format("{:.3f}", info->read_bytes / seconds / 1048576) << ", "
|
||||
<< "result RPS: " << fmt::format("{:.3f}", info->result_rows / seconds) << ", "
|
||||
<< "result MiB/s: " << fmt::format("{:.3f}", info->result_bytes / seconds / 1048576) << "."
|
||||
<< "\n";
|
||||
}
|
||||
std::cerr << "\n";
|
||||
log << "\n";
|
||||
|
||||
auto print_percentile = [&](double percent)
|
||||
{
|
||||
std::cerr << percent << "%\t\t";
|
||||
log << percent << "%\t\t";
|
||||
for (const auto & info : infos)
|
||||
{
|
||||
std::cerr << info->sampler.quantileNearest(percent / 100.0) << " sec.\t";
|
||||
log << fmt::format("{:.3f}", info->sampler.quantileNearest(percent / 100.0)) << " sec.\t";
|
||||
}
|
||||
std::cerr << "\n";
|
||||
log << "\n";
|
||||
};
|
||||
|
||||
for (int percent = 0; percent <= 90; percent += 10)
|
||||
@ -559,13 +558,15 @@ private:
|
||||
print_percentile(99.9);
|
||||
print_percentile(99.99);
|
||||
|
||||
std::cerr << "\n" << t_test.compareAndReport(confidence).second << "\n";
|
||||
log << "\n" << t_test.compareAndReport(confidence).second << "\n";
|
||||
|
||||
if (!cumulative)
|
||||
{
|
||||
for (auto & info : infos)
|
||||
info->clear();
|
||||
}
|
||||
|
||||
log.next();
|
||||
}
|
||||
|
||||
public:
|
||||
@ -741,7 +742,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << getCurrentExceptionMessage(print_stacktrace, true) << std::endl;
|
||||
std::cerr << getCurrentExceptionMessage(print_stacktrace, true) << '\n';
|
||||
return getCurrentExceptionCode();
|
||||
}
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ catch (const Exception & e)
|
||||
bool need_print_stack_trace = config().getBool("stacktrace", false) && e.code() != ErrorCodes::NETWORK_ERROR;
|
||||
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl << std::endl;
|
||||
/// If exception code isn't zero, we should return non-zero return code anyway.
|
||||
return e.code() ? e.code() : -1;
|
||||
return static_cast<UInt8>(e.code()) ? e.code() : -1;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1390,7 +1390,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
return 1;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
catch (const boost::program_options::error & e)
|
||||
{
|
||||
@ -1399,7 +1400,8 @@ int mainEntryClickHouseClient(int argc, char ** argv)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 1;
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
@ -9,14 +9,20 @@
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <Compression/ParallelCompressedWriteBuffer.h>
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedReadBufferFromFile.h>
|
||||
#include <Compression/getCompressionCodecForFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Core/Defines.h>
|
||||
|
||||
|
||||
@ -29,33 +35,35 @@ namespace DB
|
||||
}
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric LocalThread;
|
||||
extern const Metric LocalThreadActive;
|
||||
extern const Metric LocalThreadScheduled;
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Outputs sizes of uncompressed and compressed blocks for compressed file.
|
||||
/// Outputs method, sizes of uncompressed and compressed blocks for compressed file.
|
||||
void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
|
||||
{
|
||||
while (!in.eof())
|
||||
{
|
||||
in.ignore(16); /// checksum
|
||||
|
||||
char header[COMPRESSED_BLOCK_HEADER_SIZE];
|
||||
in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE);
|
||||
|
||||
UInt32 size_compressed = unalignedLoad<UInt32>(&header[1]);
|
||||
UInt32 size_compressed;
|
||||
UInt32 size_decompressed;
|
||||
auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */);
|
||||
|
||||
if (size_compressed > DBMS_MAX_COMPRESSED_SIZE)
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data.");
|
||||
|
||||
UInt32 size_decompressed = unalignedLoad<UInt32>(&header[5]);
|
||||
|
||||
DB::writeText(queryToString(codec->getFullCodecDesc()), out);
|
||||
DB::writeChar('\t', out);
|
||||
DB::writeText(size_decompressed, out);
|
||||
DB::writeChar('\t', out);
|
||||
DB::writeText(size_compressed, out);
|
||||
DB::writeChar('\n', out);
|
||||
|
||||
in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,11 +85,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
("decompress,d", "decompress")
|
||||
("offset-in-compressed-file", po::value<size_t>()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)")
|
||||
("offset-in-decompressed-block", po::value<size_t>()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)")
|
||||
("block-size,b", po::value<unsigned>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
|
||||
("block-size,b", po::value<size_t>()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size")
|
||||
("hc", "use LZ4HC instead of LZ4")
|
||||
("zstd", "use ZSTD instead of LZ4")
|
||||
("codec", po::value<std::vector<std::string>>()->multitoken(), "use codecs combination instead of LZ4")
|
||||
("level", po::value<int>(), "compression level for codecs specified via flags")
|
||||
("threads", po::value<size_t>()->default_value(1), "number of threads for parallel compression")
|
||||
("none", "use no compression instead of LZ4")
|
||||
("stat", "print block statistics of compressed data")
|
||||
("stacktrace", "print stacktrace of exception")
|
||||
@ -109,7 +118,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
bool stat_mode = options.count("stat");
|
||||
bool use_none = options.count("none");
|
||||
print_stacktrace = options.count("stacktrace");
|
||||
unsigned block_size = options["block-size"].as<unsigned>();
|
||||
size_t block_size = options["block-size"].as<size_t>();
|
||||
size_t num_threads = options["threads"].as<size_t>();
|
||||
std::vector<std::string> codecs;
|
||||
if (options.count("codec"))
|
||||
codecs = options["codec"].as<std::vector<std::string>>();
|
||||
@ -117,6 +127,12 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
if ((use_lz4hc || use_zstd || use_none) && !codecs.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
|
||||
|
||||
if (num_threads < 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid value of `threads` parameter");
|
||||
|
||||
if (num_threads > 1 && decompress)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel mode is only implemented for compression (not for decompression)");
|
||||
|
||||
if (!codecs.empty() && options.count("level"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
|
||||
|
||||
@ -145,7 +161,6 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
else
|
||||
codec = CompressionCodecFactory::instance().get(method_family, level);
|
||||
|
||||
|
||||
std::unique_ptr<ReadBufferFromFileBase> rb;
|
||||
std::unique_ptr<WriteBufferFromFileBase> wb;
|
||||
|
||||
@ -186,9 +201,20 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
else
|
||||
{
|
||||
/// Compression
|
||||
CompressedWriteBuffer to(*wb, codec, block_size);
|
||||
copyData(*rb, to);
|
||||
to.finalize();
|
||||
|
||||
if (num_threads == 1)
|
||||
{
|
||||
CompressedWriteBuffer to(*wb, codec, block_size);
|
||||
copyData(*rb, to);
|
||||
to.finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
|
||||
ParallelCompressedWriteBuffer to(*wb, codec, block_size, num_threads, pool);
|
||||
copyData(*rb, to);
|
||||
to.finalize();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
|
@ -546,16 +546,18 @@ int mainEntryClickHouseDisks(int argc, char ** argv)
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
return 0;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
catch (const boost::program_options::error & e)
|
||||
{
|
||||
std::cerr << "Bad arguments: " << e.what() << std::endl;
|
||||
return 0;
|
||||
return DB::ErrorCodes::BAD_ARGUMENTS;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 0;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
@ -448,7 +448,8 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
return 1;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
catch (const boost::program_options::error & e)
|
||||
{
|
||||
@ -458,6 +459,7 @@ int mainEntryClickHouseKeeperClient(int argc, char ** argv)
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 1;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -672,7 +672,7 @@ catch (...)
|
||||
/// Poco does not provide stacktrace.
|
||||
tryLogCurrentException("Application");
|
||||
auto code = getCurrentExceptionCode();
|
||||
return code ? code : -1;
|
||||
return static_cast<UInt8>(code) ? code : -1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -13,7 +13,7 @@ int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Databases/registerDatabases.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
@ -22,7 +23,6 @@
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Common/PoolId.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -31,7 +31,6 @@
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Common/TLDListsHolder.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Loggers/OwnFormattingChannel.h>
|
||||
@ -50,7 +49,6 @@
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
@ -71,9 +69,11 @@ namespace CurrentMetrics
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_introspection_functions;
|
||||
extern const SettingsBool implicit_select;
|
||||
extern const SettingsLocalFSReadMethod storage_file_read_method;
|
||||
}
|
||||
|
||||
@ -126,6 +126,7 @@ void applySettingsOverridesForLocal(ContextMutablePtr context)
|
||||
|
||||
settings[Setting::allow_introspection_functions] = true;
|
||||
settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap;
|
||||
settings[Setting::implicit_select] = true;
|
||||
|
||||
context->setSettings(settings);
|
||||
}
|
||||
@ -257,12 +258,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
|
||||
return overlay;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
@ -615,12 +616,14 @@ catch (const DB::Exception & e)
|
||||
{
|
||||
bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false);
|
||||
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl;
|
||||
return e.code() ? e.code() : -1;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << getCurrentExceptionMessage(false) << std::endl;
|
||||
return getCurrentExceptionCode();
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
|
||||
void LocalServer::updateLoggerLevel(const String & logs_level)
|
||||
@ -809,7 +812,12 @@ void LocalServer::processConfig()
|
||||
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
|
||||
|
||||
std::string default_database = server_settings[ServerSetting::default_database];
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
{
|
||||
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
|
||||
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
|
||||
DatabaseCatalog::instance().addUUIDMapping(uuid);
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, database);
|
||||
}
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
|
||||
if (getClientConfiguration().has("path"))
|
||||
@ -1029,7 +1037,7 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
catch (const boost::program_options::error & e)
|
||||
{
|
||||
@ -1040,6 +1048,6 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
@ -1,27 +1,22 @@
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <Common/EnvironmentChecks.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
|
||||
#include <new>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility> /// pair
|
||||
|
||||
#include <fmt/format.h>
|
||||
#if defined(SANITIZE_COVERAGE)
|
||||
# include <Common/Coverage.h>
|
||||
#endif
|
||||
|
||||
#include "config.h"
|
||||
#include "config_tools.h"
|
||||
|
||||
#include <Common/EnvironmentChecks.h>
|
||||
#include <Common/Coverage.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
#include <Common/IO.h>
|
||||
|
||||
#include <base/phdr_cache.h>
|
||||
#include <base/coverage.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <new>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility> /// pair
|
||||
#include <vector>
|
||||
|
||||
/// Universal executable for various clickhouse applications
|
||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||
@ -238,9 +233,12 @@ int main(int argc_, char ** argv_)
|
||||
/// clickhouse # spawn local
|
||||
/// clickhouse local # spawn local
|
||||
/// clickhouse "select ..." # spawn local
|
||||
/// clickhouse /tmp/repro --enable-analyzer
|
||||
///
|
||||
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'
|
||||
|| std::string_view(argv[1]).contains(' ')))
|
||||
std::error_code ec;
|
||||
if (main_func == printHelp && !argv.empty()
|
||||
&& (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ')
|
||||
|| std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec)))
|
||||
{
|
||||
main_func = mainEntryClickHouseLocal;
|
||||
}
|
||||
|
@ -1480,5 +1480,5 @@ catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ int mainEntryClickHouseODBCBridge(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ int mainEntryClickHouseServer(int argc, char ** argv)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
return static_cast<UInt8>(code) ? code : 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2537,7 +2537,7 @@ catch (...)
|
||||
/// Poco does not provide stacktrace.
|
||||
tryLogCurrentException("Application");
|
||||
auto code = getCurrentExceptionCode();
|
||||
return code ? code : -1;
|
||||
return static_cast<UInt8>(code) ? code : -1;
|
||||
}
|
||||
|
||||
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
|
@ -59,7 +59,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
|
||||
{
|
||||
if (0 != getgrgid_r(gid, &entry, buf.get(), buf_size, &result))
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid);
|
||||
}
|
||||
|
||||
gid = entry.gr_gid;
|
||||
}
|
||||
@ -84,7 +90,13 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
|
||||
{
|
||||
if (0 != getpwuid_r(uid, &entry, buf.get(), buf_size, &result))
|
||||
throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwuid_r' to obtain uid from user name ({})", uid);
|
||||
|
||||
if (!result)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid);
|
||||
}
|
||||
|
||||
uid = entry.pw_uid;
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
||||
{
|
||||
increment(place, static_cast<const ColVecType &>(*columns[0]).getData()[row_num]);
|
||||
increment(place, Numerator(static_cast<const ColVecType &>(*columns[0]).getData()[row_num]));
|
||||
++this->data(place).denominator;
|
||||
}
|
||||
|
||||
|
@ -27,9 +27,9 @@ namespace
|
||||
template <typename T>
|
||||
struct AggregationFunctionDeltaSumData
|
||||
{
|
||||
T sum = 0;
|
||||
T last = 0;
|
||||
T first = 0;
|
||||
T sum{};
|
||||
T last{};
|
||||
T first{};
|
||||
bool seen = false;
|
||||
};
|
||||
|
||||
|
@ -22,14 +22,21 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations
|
||||
* over all pairs of data types, and we deeply regret that.
|
||||
*
|
||||
* We cannot remove all combinations, because the binary representation of serialized data has to remain the same,
|
||||
* but we can partially heal the wound by treating unsigned and signed data types in the same way.
|
||||
*/
|
||||
|
||||
template <typename ValueType, typename TimestampType>
|
||||
struct AggregationFunctionDeltaSumTimestampData
|
||||
{
|
||||
ValueType sum = 0;
|
||||
ValueType first = 0;
|
||||
ValueType last = 0;
|
||||
TimestampType first_ts = 0;
|
||||
TimestampType last_ts = 0;
|
||||
ValueType sum{};
|
||||
ValueType first{};
|
||||
ValueType last{};
|
||||
TimestampType first_ts{};
|
||||
TimestampType last_ts{};
|
||||
bool seen = false;
|
||||
};
|
||||
|
||||
@ -37,23 +44,22 @@ template <typename ValueType, typename TimestampType>
|
||||
class AggregationFunctionDeltaSumTimestamp final
|
||||
: public IAggregateFunctionDataHelper<
|
||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
||||
>
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>
|
||||
{
|
||||
public:
|
||||
AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionDataHelper<
|
||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
||||
>{arguments, params, createResultType()}
|
||||
{}
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{arguments, params, createResultType()}
|
||||
{
|
||||
}
|
||||
|
||||
AggregationFunctionDeltaSumTimestamp()
|
||||
: IAggregateFunctionDataHelper<
|
||||
AggregationFunctionDeltaSumTimestampData<ValueType, TimestampType>,
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>
|
||||
>{}
|
||||
{}
|
||||
AggregationFunctionDeltaSumTimestamp<ValueType, TimestampType>>{}
|
||||
{
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return false; }
|
||||
|
||||
@ -63,8 +69,8 @@ public:
|
||||
|
||||
void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
auto value = assert_cast<const ColumnVector<ValueType> &>(*columns[0]).getData()[row_num];
|
||||
auto ts = assert_cast<const ColumnVector<TimestampType> &>(*columns[1]).getData()[row_num];
|
||||
auto value = unalignedLoad<ValueType>(columns[0]->getRawData().data() + row_num * sizeof(ValueType));
|
||||
auto ts = unalignedLoad<TimestampType>(columns[1]->getRawData().data() + row_num * sizeof(TimestampType));
|
||||
|
||||
auto & data = this->data(place);
|
||||
|
||||
@ -172,10 +178,48 @@ public:
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
assert_cast<ColumnVector<ValueType> &>(to).getData().push_back(this->data(place).sum);
|
||||
static_cast<ColumnFixedSizeHelper &>(to).template insertRawData<sizeof(ValueType)>(
|
||||
reinterpret_cast<const char *>(&this->data(place).sum));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(second_type);
|
||||
|
||||
if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||
if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
|
||||
if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||
if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate<FirstType, UInt64>(args...);
|
||||
if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate<FirstType, Float32>(args...);
|
||||
if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate<FirstType, Float64>(args...);
|
||||
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
|
||||
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
IAggregateFunction * createWithTwoTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(first_type);
|
||||
|
||||
if (which.idx == TypeIndex::UInt8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::UInt16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::UInt32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::UInt64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Int8) return createWithTwoTypesSecond<UInt8, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Int16) return createWithTwoTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Int32) return createWithTwoTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Int64) return createWithTwoTypesSecond<UInt64, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Float32) return createWithTwoTypesSecond<Float32, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Float64) return createWithTwoTypesSecond<Float64, AggregateFunctionTemplate>(second_type, args...);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
|
||||
const String & name,
|
||||
const DataTypes & arguments,
|
||||
@ -193,8 +237,14 @@ AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp(
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
||||
"must be Int, Float, Date, DateTime", arguments[1]->getName(), name);
|
||||
|
||||
return AggregateFunctionPtr(createWithTwoNumericOrDateTypes<AggregationFunctionDeltaSumTimestamp>(
|
||||
auto res = AggregateFunctionPtr(createWithTwoTypes<AggregationFunctionDeltaSumTimestamp>(
|
||||
*arguments[0], *arguments[1], arguments, params));
|
||||
|
||||
if (!res)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}, "
|
||||
"this type is not supported", arguments[0]->getName(), name);
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ template <typename T>
|
||||
struct GroupArraySamplerData
|
||||
{
|
||||
/// For easy serialization.
|
||||
static_assert(std::has_unique_object_representations_v<T> || std::is_floating_point_v<T>);
|
||||
static_assert(std::has_unique_object_representations_v<T> || is_floating_point<T>);
|
||||
|
||||
// Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena
|
||||
using Allocator = MixedAlignedArenaAllocator<alignof(T), 4096>;
|
||||
@ -120,7 +120,7 @@ template <typename T>
|
||||
struct GroupArrayNumericData<T, false>
|
||||
{
|
||||
/// For easy serialization.
|
||||
static_assert(std::has_unique_object_representations_v<T> || std::is_floating_point_v<T>);
|
||||
static_assert(std::has_unique_object_representations_v<T> || is_floating_point<T>);
|
||||
|
||||
// Switch to ordinary Allocator after 4096 bytes to avoid fragmentation and trash in Arena
|
||||
using Allocator = MixedAlignedArenaAllocator<alignof(T), 4096>;
|
||||
|
@ -38,7 +38,7 @@ template <typename T>
|
||||
struct MovingData
|
||||
{
|
||||
/// For easy serialization.
|
||||
static_assert(std::has_unique_object_representations_v<T> || std::is_floating_point_v<T>);
|
||||
static_assert(std::has_unique_object_representations_v<T> || is_floating_point<T>);
|
||||
|
||||
using Accumulator = T;
|
||||
|
||||
|
@ -187,7 +187,7 @@ public:
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
{
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
if constexpr (is_floating_point<T>)
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
return std::make_shared<DataTypeUInt64>();
|
||||
}
|
||||
@ -227,7 +227,7 @@ public:
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
if constexpr (is_floating_point<T>)
|
||||
assert_cast<ColumnFloat64 &>(to).getData().push_back(getIntervalLengthSum<Float64>(this->data(place)));
|
||||
else
|
||||
assert_cast<ColumnUInt64 &>(to).getData().push_back(getIntervalLengthSum<UInt64>(this->data(place)));
|
||||
|
@ -155,9 +155,9 @@ public:
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
Int64 current_intersections = 0;
|
||||
Int64 max_intersections = 0;
|
||||
PointType position_of_max_intersections = 0;
|
||||
Int64 current_intersections{};
|
||||
Int64 max_intersections{};
|
||||
PointType position_of_max_intersections{};
|
||||
|
||||
/// const_cast because we will sort the array
|
||||
auto & array = this->data(place).value;
|
||||
|
@ -387,7 +387,7 @@ template <typename Value, bool return_float, bool interpolated>
|
||||
using FuncQuantileExactWeighted = AggregateFunctionQuantile<
|
||||
Value,
|
||||
QuantileExactWeighted<Value, interpolated>,
|
||||
NameQuantileExactWeighted,
|
||||
std::conditional_t<interpolated, NameQuantileExactWeightedInterpolated, NameQuantileExactWeighted>,
|
||||
true,
|
||||
std::conditional_t<return_float, Float64, void>,
|
||||
false,
|
||||
@ -396,7 +396,7 @@ template <typename Value, bool return_float, bool interpolated>
|
||||
using FuncQuantilesExactWeighted = AggregateFunctionQuantile<
|
||||
Value,
|
||||
QuantileExactWeighted<Value, interpolated>,
|
||||
NameQuantilesExactWeighted,
|
||||
std::conditional_t<interpolated, NameQuantilesExactWeightedInterpolated, NameQuantilesExactWeighted>,
|
||||
true,
|
||||
std::conditional_t<return_float, Float64, void>,
|
||||
true,
|
||||
|
@ -45,12 +45,12 @@ struct AggregateFunctionSparkbarData
|
||||
Y insert(const X & x, const Y & y)
|
||||
{
|
||||
if (isNaN(y) || y <= 0)
|
||||
return 0;
|
||||
return {};
|
||||
|
||||
auto [it, inserted] = points.insert({x, y});
|
||||
if (!inserted)
|
||||
{
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
if constexpr (is_floating_point<Y>)
|
||||
{
|
||||
it->getMapped() += y;
|
||||
return it->getMapped();
|
||||
@ -173,13 +173,13 @@ private:
|
||||
|
||||
if (from_x >= to_x)
|
||||
{
|
||||
size_t sz = updateFrame(values, 8);
|
||||
size_t sz = updateFrame(values, Y{8});
|
||||
values.push_back('\0');
|
||||
offsets.push_back(offsets.empty() ? sz + 1 : offsets.back() + sz + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
PaddedPODArray<Y> histogram(width, 0);
|
||||
PaddedPODArray<Y> histogram(width, Y{0});
|
||||
PaddedPODArray<UInt64> count_histogram(width, 0); /// The number of points in each bucket
|
||||
|
||||
for (const auto & point : data.points)
|
||||
@ -197,7 +197,7 @@ private:
|
||||
|
||||
Y res;
|
||||
bool has_overfllow = false;
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
if constexpr (is_floating_point<Y>)
|
||||
res = histogram[index] + point.getMapped();
|
||||
else
|
||||
has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res);
|
||||
@ -218,10 +218,10 @@ private:
|
||||
for (size_t i = 0; i < histogram.size(); ++i)
|
||||
{
|
||||
if (count_histogram[i] > 0)
|
||||
histogram[i] /= count_histogram[i];
|
||||
histogram[i] = histogram[i] / count_histogram[i];
|
||||
}
|
||||
|
||||
Y y_max = 0;
|
||||
Y y_max{};
|
||||
for (auto & y : histogram)
|
||||
{
|
||||
if (isNaN(y) || y <= 0)
|
||||
@ -245,8 +245,8 @@ private:
|
||||
continue;
|
||||
}
|
||||
|
||||
constexpr auto levels_num = static_cast<Y>(BAR_LEVELS - 1);
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
constexpr auto levels_num = Y{BAR_LEVELS - 1};
|
||||
if constexpr (is_floating_point<Y>)
|
||||
{
|
||||
y = y / (y_max / levels_num) + 1;
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ struct AggregateFunctionSumData
|
||||
size_t count = end - start;
|
||||
const auto * end_ptr = ptr + count;
|
||||
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
if constexpr (is_floating_point<T>)
|
||||
{
|
||||
/// Compiler cannot unroll this loop, do it manually.
|
||||
/// (at least for floats, most likely due to the lack of -fassociative-math)
|
||||
@ -83,7 +83,7 @@ struct AggregateFunctionSumData
|
||||
while (ptr < unrolled_end)
|
||||
{
|
||||
for (size_t i = 0; i < unroll_count; ++i)
|
||||
Impl::add(partial_sums[i], ptr[i]);
|
||||
Impl::add(partial_sums[i], T(ptr[i]));
|
||||
ptr += unroll_count;
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ struct AggregateFunctionSumData
|
||||
T local_sum{};
|
||||
while (ptr < end_ptr)
|
||||
{
|
||||
Impl::add(local_sum, *ptr);
|
||||
Impl::add(local_sum, T(*ptr));
|
||||
++ptr;
|
||||
}
|
||||
Impl::add(sum, local_sum);
|
||||
@ -193,12 +193,11 @@ struct AggregateFunctionSumData
|
||||
Impl::add(sum, local_sum);
|
||||
return;
|
||||
}
|
||||
else if constexpr (std::is_floating_point_v<T>)
|
||||
else if constexpr (is_floating_point<T> && (sizeof(Value) == 4 || sizeof(Value) == 8))
|
||||
{
|
||||
/// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned
|
||||
/// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned
|
||||
/// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep)
|
||||
static_assert(sizeof(Value) == 4 || sizeof(Value) == 8);
|
||||
using equivalent_integer = typename std::conditional_t<sizeof(Value) == 4, UInt32, UInt64>;
|
||||
using EquivalentInteger = typename std::conditional_t<sizeof(Value) == 4, UInt32, UInt64>;
|
||||
|
||||
constexpr size_t unroll_count = 128 / sizeof(T);
|
||||
T partial_sums[unroll_count]{};
|
||||
@ -209,11 +208,11 @@ struct AggregateFunctionSumData
|
||||
{
|
||||
for (size_t i = 0; i < unroll_count; ++i)
|
||||
{
|
||||
equivalent_integer value;
|
||||
std::memcpy(&value, &ptr[i], sizeof(Value));
|
||||
EquivalentInteger value;
|
||||
memcpy(&value, &ptr[i], sizeof(Value));
|
||||
value &= (!condition_map[i] != add_if_zero) - 1;
|
||||
Value d;
|
||||
std::memcpy(&d, &value, sizeof(Value));
|
||||
memcpy(&d, &value, sizeof(Value));
|
||||
Impl::add(partial_sums[i], d);
|
||||
}
|
||||
ptr += unroll_count;
|
||||
@ -228,7 +227,7 @@ struct AggregateFunctionSumData
|
||||
while (ptr < end_ptr)
|
||||
{
|
||||
if (!*condition_map == add_if_zero)
|
||||
Impl::add(local_sum, *ptr);
|
||||
Impl::add(local_sum, T(*ptr));
|
||||
++ptr;
|
||||
++condition_map;
|
||||
}
|
||||
@ -306,7 +305,7 @@ struct AggregateFunctionSumData
|
||||
template <typename T>
|
||||
struct AggregateFunctionSumKahanData
|
||||
{
|
||||
static_assert(std::is_floating_point_v<T>,
|
||||
static_assert(is_floating_point<T>,
|
||||
"It doesn't make sense to use Kahan Summation algorithm for non floating point types");
|
||||
|
||||
T sum{};
|
||||
@ -489,10 +488,7 @@ public:
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
const auto & column = assert_cast<const ColVecType &>(*columns[0]);
|
||||
if constexpr (is_big_int_v<T>)
|
||||
this->data(place).add(static_cast<TResult>(column.getData()[row_num]));
|
||||
else
|
||||
this->data(place).add(column.getData()[row_num]);
|
||||
this->data(place).add(static_cast<TResult>(column.getData()[row_num]));
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
|
@ -257,7 +257,7 @@ template <typename T> struct AggregateFunctionUniqTraits
|
||||
{
|
||||
static UInt64 hash(T x)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Float32> || std::is_same_v<T, Float64>)
|
||||
if constexpr (is_floating_point<T>)
|
||||
{
|
||||
return bit_cast<UInt64>(x);
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public:
|
||||
/// Initially UInt128 was introduced only for UUID, and then the other big-integer types were added.
|
||||
hash = static_cast<HashValueType>(sipHash64(value));
|
||||
}
|
||||
else if constexpr (std::is_floating_point_v<T>)
|
||||
else if constexpr (is_floating_point<T>)
|
||||
{
|
||||
hash = static_cast<HashValueType>(intHash64(bit_cast<UInt64>(value)));
|
||||
}
|
||||
|
@ -184,36 +184,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
||||
}
|
||||
|
||||
/** For template with two arguments.
|
||||
* This is an extremely dangerous for code bloat - do not use.
|
||||
*/
|
||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithTwoNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(second_type);
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
|
||||
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(first_type);
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) \
|
||||
return createWithTwoNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
if (which.idx == TypeIndex::Enum8)
|
||||
return createWithTwoNumericTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Enum16)
|
||||
return createWithTwoNumericTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
@ -237,46 +209,6 @@ static IAggregateFunction * createWithTwoBasicNumericTypes(const IDataType & fir
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename FirstType, template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithTwoNumericOrDateTypesSecond(const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(second_type);
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
|
||||
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
|
||||
|
||||
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
|
||||
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
|
||||
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <template <typename, typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithTwoNumericOrDateTypes(const IDataType & first_type, const IDataType & second_type, TArgs && ... args)
|
||||
{
|
||||
WhichDataType which(first_type);
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) \
|
||||
return createWithTwoNumericOrDateTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
if (which.idx == TypeIndex::Enum8)
|
||||
return createWithTwoNumericOrDateTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::Enum16)
|
||||
return createWithTwoNumericOrDateTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
|
||||
|
||||
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
|
||||
if (which.idx == TypeIndex::Date)
|
||||
return createWithTwoNumericOrDateTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
|
||||
if (which.idx == TypeIndex::DateTime)
|
||||
return createWithTwoNumericOrDateTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
|
||||
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
|
||||
{
|
||||
|
@ -391,7 +391,7 @@ public:
|
||||
ResultType getImpl(Float64 level)
|
||||
{
|
||||
if (centroids.empty())
|
||||
return std::is_floating_point_v<ResultType> ? std::numeric_limits<ResultType>::quiet_NaN() : 0;
|
||||
return is_floating_point<ResultType> ? std::numeric_limits<ResultType>::quiet_NaN() : 0;
|
||||
|
||||
compress();
|
||||
|
||||
|
@ -276,6 +276,6 @@ private:
|
||||
{
|
||||
if (OnEmpty == ReservoirSamplerOnEmpty::THROW)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSampler");
|
||||
return NanLikeValueConstructor<ResultType, std::is_floating_point_v<ResultType>>::getValue();
|
||||
return NanLikeValueConstructor<ResultType, is_floating_point<ResultType>>::getValue();
|
||||
}
|
||||
};
|
||||
|
@ -271,7 +271,7 @@ private:
|
||||
{
|
||||
if (OnEmpty == ReservoirSamplerDeterministicOnEmpty::THROW)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Quantile of empty ReservoirSamplerDeterministic");
|
||||
return NanLikeValueConstructor<ResultType, std::is_floating_point_v<ResultType>>::getValue();
|
||||
return NanLikeValueConstructor<ResultType, is_floating_point<ResultType>>::getValue();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -88,6 +88,7 @@ void FunctionNode::resolveAsFunction(FunctionBasePtr function_value)
|
||||
function_name = function_value->getName();
|
||||
function = std::move(function_value);
|
||||
kind = FunctionKind::ORDINARY;
|
||||
nulls_action = NullsAction::EMPTY;
|
||||
}
|
||||
|
||||
void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value)
|
||||
@ -95,6 +96,12 @@ void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_fun
|
||||
function_name = aggregate_function_value->getName();
|
||||
function = std::move(aggregate_function_value);
|
||||
kind = FunctionKind::AGGREGATE;
|
||||
/** When the function is resolved, we do not need the nulls action anymore.
|
||||
* The only thing that the nulls action does is map from one function to another.
|
||||
* Thus, the nulls action is encoded in the function name and does not make sense anymore.
|
||||
* Keeping the nulls action may lead to incorrect comparison of functions, e.g., count() and count() IGNORE NULLS are the same function.
|
||||
*/
|
||||
nulls_action = NullsAction::EMPTY;
|
||||
}
|
||||
|
||||
void FunctionNode::resolveAsWindowFunction(AggregateFunctionPtr window_function_value)
|
||||
|
@ -48,9 +48,15 @@ ASTPtr JoinNode::toASTTableJoin() const
|
||||
auto join_expression_ast = children[join_expression_child_index]->toAST();
|
||||
|
||||
if (is_using_join_expression)
|
||||
join_ast->using_expression_list = std::move(join_expression_ast);
|
||||
{
|
||||
join_ast->using_expression_list = join_expression_ast;
|
||||
join_ast->children.push_back(join_ast->using_expression_list);
|
||||
}
|
||||
else
|
||||
join_ast->on_expression = std::move(join_expression_ast);
|
||||
{
|
||||
join_ast->on_expression = join_expression_ast;
|
||||
join_ast->children.push_back(join_ast->on_expression);
|
||||
}
|
||||
}
|
||||
|
||||
return join_ast;
|
||||
|
@ -85,10 +85,9 @@ QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String
|
||||
}
|
||||
|
||||
FunctionNodePtr createResolvedAggregateFunction(
|
||||
const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}, NullsAction action = NullsAction::EMPTY)
|
||||
const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {})
|
||||
{
|
||||
auto function_node = std::make_shared<FunctionNode>(name);
|
||||
function_node->setNullsAction(action);
|
||||
|
||||
if (!parameters.empty())
|
||||
{
|
||||
@ -100,7 +99,7 @@ FunctionNodePtr createResolvedAggregateFunction(
|
||||
function_node->getArguments().getNodes() = { argument };
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(name, action, {argument->getResultType()}, parameters, properties);
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(name, NullsAction::EMPTY, {argument->getResultType()}, parameters, properties);
|
||||
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
|
||||
return function_node;
|
||||
|
@ -602,9 +602,21 @@ public:
|
||||
return projection_columns;
|
||||
}
|
||||
|
||||
/// Returns true if query node is resolved, false otherwise
|
||||
bool isResolved() const
|
||||
{
|
||||
return !projection_columns.empty();
|
||||
}
|
||||
|
||||
/// Resolve query node projection columns
|
||||
void resolveProjectionColumns(NamesAndTypes projection_columns_value);
|
||||
|
||||
/// Clear query node projection columns
|
||||
void clearProjectionColumns()
|
||||
{
|
||||
projection_columns.clear();
|
||||
}
|
||||
|
||||
/// Remove unused projection columns
|
||||
void removeUnusedProjectionColumns(const std::unordered_set<std::string> & used_projection_columns);
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <memory>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include "Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
@ -16,39 +15,39 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.h>
|
||||
#include <Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h>
|
||||
#include <Analyzer/Passes/ArrayExistsToHasPass.h>
|
||||
#include <Analyzer/Passes/AutoFinalOnQueryPass.h>
|
||||
#include <Analyzer/Passes/ComparisonTupleEliminationPass.h>
|
||||
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
#include <Analyzer/Passes/CountDistinctPass.h>
|
||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||
#include <Analyzer/Passes/FunctionToSubcolumnsPass.h>
|
||||
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
||||
#include <Analyzer/Passes/GroupingFunctionsResolvePass.h>
|
||||
#include <Analyzer/Passes/IfChainToMultiIfPass.h>
|
||||
#include <Analyzer/Passes/IfConstantConditionPass.h>
|
||||
#include <Analyzer/Passes/IfTransformStringsToEnumPass.h>
|
||||
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
|
||||
#include <Analyzer/Passes/MultiIfToIfPass.h>
|
||||
#include <Analyzer/Passes/NormalizeCountVariantsPass.h>
|
||||
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||
#include <Analyzer/Passes/OptimizeGroupByFunctionKeysPass.h>
|
||||
#include <Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h>
|
||||
#include <Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.h>
|
||||
#include <Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h>
|
||||
#include <Analyzer/Passes/OrderByTupleEliminationPass.h>
|
||||
#include <Analyzer/Passes/QueryAnalysisPass.h>
|
||||
#include <Analyzer/Passes/RemoveUnusedProjectionColumnsPass.h>
|
||||
#include <Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.h>
|
||||
#include <Analyzer/Passes/CountDistinctPass.h>
|
||||
#include <Analyzer/Passes/UniqToCountPass.h>
|
||||
#include <Analyzer/Passes/FunctionToSubcolumnsPass.h>
|
||||
#include <Analyzer/Passes/RewriteAggregateFunctionWithIfPass.h>
|
||||
#include <Analyzer/Passes/SumIfToCountIfPass.h>
|
||||
#include <Analyzer/Passes/MultiIfToIfPass.h>
|
||||
#include <Analyzer/Passes/IfConstantConditionPass.h>
|
||||
#include <Analyzer/Passes/IfChainToMultiIfPass.h>
|
||||
#include <Analyzer/Passes/OrderByTupleEliminationPass.h>
|
||||
#include <Analyzer/Passes/NormalizeCountVariantsPass.h>
|
||||
#include <Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h>
|
||||
#include <Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h>
|
||||
#include <Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h>
|
||||
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
||||
#include <Analyzer/Passes/OptimizeGroupByFunctionKeysPass.h>
|
||||
#include <Analyzer/Passes/IfTransformStringsToEnumPass.h>
|
||||
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
|
||||
#include <Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.h>
|
||||
#include <Analyzer/Passes/GroupingFunctionsResolvePass.h>
|
||||
#include <Analyzer/Passes/AutoFinalOnQueryPass.h>
|
||||
#include <Analyzer/Passes/ArrayExistsToHasPass.h>
|
||||
#include <Analyzer/Passes/ComparisonTupleEliminationPass.h>
|
||||
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
|
||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||
#include <Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.h>
|
||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
#include <Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.h>
|
||||
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||
|
||||
#include <Analyzer/Passes/SumIfToCountIfPass.h>
|
||||
#include <Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h>
|
||||
#include <Analyzer/Passes/UniqToCountPass.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Interpreters/ProcessorsProfileLog.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
@ -51,7 +52,6 @@
|
||||
#include <Analyzer/ArrayJoinNode.h>
|
||||
#include <Analyzer/JoinNode.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryTreeBuilder.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/Identifier.h>
|
||||
@ -103,6 +103,8 @@ namespace Setting
|
||||
extern const SettingsBool single_join_prefer_left_table;
|
||||
extern const SettingsBool transform_null_in;
|
||||
extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions;
|
||||
extern const SettingsBool allow_suspicious_types_in_group_by;
|
||||
extern const SettingsBool allow_suspicious_types_in_order_by;
|
||||
extern const SettingsBool use_concurrency_control;
|
||||
}
|
||||
|
||||
@ -675,6 +677,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
||||
"tuple"});
|
||||
}
|
||||
}
|
||||
|
||||
logProcessorProfile(context, io.pipeline.getProcessors());
|
||||
}
|
||||
|
||||
scalars_cache.emplace(node_with_hash, scalar_block);
|
||||
@ -2966,27 +2970,29 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
/// Replace storage with values storage of insertion block
|
||||
if (StoragePtr storage = scope.context->getViewSource())
|
||||
{
|
||||
QueryTreeNodePtr table_expression;
|
||||
/// Process possibly nested sub-selects
|
||||
for (auto * query_node = in_second_argument->as<QueryNode>(); query_node; query_node = table_expression->as<QueryNode>())
|
||||
table_expression = extractLeftTableExpression(query_node->getJoinTree());
|
||||
QueryTreeNodePtr table_expression = in_second_argument;
|
||||
|
||||
if (table_expression)
|
||||
/// Process possibly nested sub-selects
|
||||
while (table_expression)
|
||||
{
|
||||
if (auto * query_table_node = table_expression->as<TableNode>())
|
||||
{
|
||||
if (query_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
|
||||
{
|
||||
auto replacement_table_expression = std::make_shared<TableNode>(storage, scope.context);
|
||||
if (std::optional<TableExpressionModifiers> table_expression_modifiers = query_table_node->getTableExpressionModifiers())
|
||||
replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers);
|
||||
in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression));
|
||||
}
|
||||
}
|
||||
if (auto * query_node = table_expression->as<QueryNode>())
|
||||
table_expression = extractLeftTableExpression(query_node->getJoinTree());
|
||||
else if (auto * union_node = table_expression->as<UnionNode>())
|
||||
table_expression = union_node->getQueries().getNodes().at(0);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
TableNode * table_expression_table_node = table_expression ? table_expression->as<TableNode>() : nullptr;
|
||||
|
||||
if (table_expression_table_node &&
|
||||
table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
|
||||
{
|
||||
auto replacement_table_expression_table_node = table_expression_table_node->clone();
|
||||
replacement_table_expression_table_node->as<TableNode &>().updateStorage(storage, scope.context);
|
||||
in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression_table_node));
|
||||
}
|
||||
}
|
||||
|
||||
resolveExpressionNode(in_second_argument, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
|
||||
}
|
||||
|
||||
/// Edge case when the first argument of IN is scalar subquery.
|
||||
@ -3019,9 +3025,10 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
argument_column.name = arguments_projection_names[function_argument_index];
|
||||
|
||||
/** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction
|
||||
* where function argument types are initialized with empty array of lambda arguments size.
|
||||
* where function argument types are initialized with empty arrays of lambda arguments size.
|
||||
*/
|
||||
if (const auto * lambda_node = function_argument->as<const LambdaNode>())
|
||||
const auto * lambda_node = function_argument->as<const LambdaNode>();
|
||||
if (lambda_node)
|
||||
{
|
||||
size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size();
|
||||
argument_column.type = std::make_shared<DataTypeFunction>(DataTypes(lambda_arguments_size, nullptr), nullptr);
|
||||
@ -3493,15 +3500,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
else
|
||||
function_base = function->build(argument_columns);
|
||||
|
||||
/// Do not constant fold get scalar functions
|
||||
// bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
|
||||
// function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
|
||||
|
||||
/** If function is suitable for constant folding try to convert it to constant.
|
||||
* Example: SELECT plus(1, 1);
|
||||
* Result: SELECT 2;
|
||||
*/
|
||||
if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding)
|
||||
if (function_base->isSuitableForConstantFolding())
|
||||
{
|
||||
auto result_type = function_base->getResultType();
|
||||
auto executable_function = function_base->prepare(argument_columns);
|
||||
@ -3510,7 +3513,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
|
||||
if (all_arguments_constants)
|
||||
{
|
||||
size_t num_rows = function_arguments.empty() ? 0 : argument_columns.front().column->size();
|
||||
size_t num_rows = 0;
|
||||
if (!argument_columns.empty())
|
||||
num_rows = argument_columns.front().column->size();
|
||||
column = executable_function->execute(argument_columns, result_type, num_rows, true);
|
||||
}
|
||||
else
|
||||
@ -4028,6 +4033,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
|
||||
sort_node.getExpression() = sort_column_list_node->getNodes().front();
|
||||
}
|
||||
|
||||
validateSortingKeyType(sort_node.getExpression()->getResultType(), scope);
|
||||
|
||||
size_t sort_expression_projection_names_size = sort_expression_projection_names.size();
|
||||
if (sort_expression_projection_names_size != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
@ -4142,6 +4149,26 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
|
||||
return result_projection_names;
|
||||
}
|
||||
|
||||
void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const
|
||||
{
|
||||
if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_order_by])
|
||||
return;
|
||||
|
||||
auto check = [](const IDataType & type)
|
||||
{
|
||||
if (isDynamic(type) || isVariant(type))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. "
|
||||
"Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if "
|
||||
"its a JSON path subcolumn) or casting this column to a specific data type. "
|
||||
"Set setting allow_suspicious_types_in_order_by = 1 in order to allow it");
|
||||
};
|
||||
|
||||
check(*sorting_key_type);
|
||||
sorting_key_type->forEachChild(check);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
@ -4181,11 +4208,12 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR
|
||||
expandTuplesInList(group_by_list);
|
||||
}
|
||||
|
||||
if (scope.group_by_use_nulls)
|
||||
for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes())
|
||||
{
|
||||
for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes())
|
||||
for (const auto & group_by_elem : grouping_set->as<ListNode>()->getNodes())
|
||||
{
|
||||
for (const auto & group_by_elem : grouping_set->as<ListNode>()->getNodes())
|
||||
validateGroupByKeyType(group_by_elem->getResultType(), scope);
|
||||
if (scope.group_by_use_nulls)
|
||||
scope.nullable_group_by_keys.insert(group_by_elem);
|
||||
}
|
||||
}
|
||||
@ -4201,14 +4229,37 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR
|
||||
auto & group_by_list = query_node_typed.getGroupBy().getNodes();
|
||||
expandTuplesInList(group_by_list);
|
||||
|
||||
if (scope.group_by_use_nulls)
|
||||
for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes())
|
||||
{
|
||||
for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes())
|
||||
validateGroupByKeyType(group_by_elem->getResultType(), scope);
|
||||
if (scope.group_by_use_nulls)
|
||||
scope.nullable_group_by_keys.insert(group_by_elem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Validate data types of GROUP BY key.
|
||||
*/
|
||||
void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const
|
||||
{
|
||||
if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_group_by])
|
||||
return;
|
||||
|
||||
auto check = [](const IDataType & type)
|
||||
{
|
||||
if (isDynamic(type) || isVariant(type))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. "
|
||||
"Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if "
|
||||
"its a JSON path subcolumn) or casting this column to a specific data type. "
|
||||
"Set setting allow_suspicious_types_in_group_by = 1 in order to allow it");
|
||||
};
|
||||
|
||||
check(*group_by_key_type);
|
||||
group_by_key_type->forEachChild(check);
|
||||
}
|
||||
|
||||
/** Resolve interpolate columns nodes list.
|
||||
*/
|
||||
void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope)
|
||||
@ -5347,6 +5398,16 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
|
||||
auto & query_node_typed = query_node->as<QueryNode &>();
|
||||
|
||||
/** It is unsafe to call resolveQuery on already resolved query node, because during identifier resolution process
|
||||
* we replace identifiers with expressions without aliases, also at the end of resolveQuery all aliases from all nodes will be removed.
|
||||
* For subsequent resolveQuery executions it is possible to have wrong projection header, because for nodes
|
||||
* with aliases projection name is alias.
|
||||
*
|
||||
* If for client it is necessary to resolve query node after clone, client must clear projection columns from query node before resolve.
|
||||
*/
|
||||
if (query_node_typed.isResolved())
|
||||
return;
|
||||
|
||||
if (query_node_typed.isCTE())
|
||||
ctes_in_resolve_process.insert(query_node_typed.getCTEName());
|
||||
|
||||
@ -5485,16 +5546,13 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
*/
|
||||
scope.use_identifier_lookup_to_result_cache = false;
|
||||
|
||||
if (query_node_typed.getJoinTree())
|
||||
{
|
||||
TableExpressionsAliasVisitor table_expressions_visitor(scope);
|
||||
table_expressions_visitor.visit(query_node_typed.getJoinTree());
|
||||
TableExpressionsAliasVisitor table_expressions_visitor(scope);
|
||||
table_expressions_visitor.visit(query_node_typed.getJoinTree());
|
||||
|
||||
initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope);
|
||||
scope.aliases.alias_name_to_table_expression_node.clear();
|
||||
initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope);
|
||||
scope.aliases.alias_name_to_table_expression_node.clear();
|
||||
|
||||
resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor);
|
||||
}
|
||||
resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor);
|
||||
|
||||
if (!scope.group_by_use_nulls)
|
||||
scope.use_identifier_lookup_to_result_cache = true;
|
||||
@ -5712,6 +5770,9 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier
|
||||
{
|
||||
auto & union_node_typed = union_node->as<UnionNode &>();
|
||||
|
||||
if (union_node_typed.isResolved())
|
||||
return;
|
||||
|
||||
if (union_node_typed.isCTE())
|
||||
ctes_in_resolve_process.insert(union_node_typed.getCTEName());
|
||||
|
||||
|
@ -220,8 +220,12 @@ private:
|
||||
|
||||
ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope);
|
||||
|
||||
void validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const;
|
||||
|
||||
void resolveGroupByNode(QueryNode & query_node_typed, IdentifierResolveScope & scope);
|
||||
|
||||
void validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const;
|
||||
|
||||
void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope);
|
||||
|
||||
void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope);
|
||||
|
@ -35,6 +35,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int TYPE_MISMATCH;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_)
|
||||
@ -50,6 +51,26 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_)
|
||||
children[queries_child_index] = std::make_shared<ListNode>();
|
||||
}
|
||||
|
||||
bool UnionNode::isResolved() const
|
||||
{
|
||||
for (const auto & query_node : getQueries().getNodes())
|
||||
{
|
||||
bool is_resolved = false;
|
||||
|
||||
if (auto * query_node_typed = query_node->as<QueryNode>())
|
||||
is_resolved = query_node_typed->isResolved();
|
||||
else if (auto * union_node_typed = query_node->as<UnionNode>())
|
||||
is_resolved = union_node_typed->isResolved();
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected query tree node type in UNION node");
|
||||
|
||||
if (!is_resolved)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
NamesAndTypes UnionNode::computeProjectionColumns() const
|
||||
{
|
||||
if (recursive_cte_table)
|
||||
|
@ -163,6 +163,9 @@ public:
|
||||
return children[queries_child_index];
|
||||
}
|
||||
|
||||
/// Returns true if union node is resolved, false otherwise
|
||||
bool isResolved() const;
|
||||
|
||||
/// Compute union node projection columns
|
||||
NamesAndTypes computeProjectionColumns() const;
|
||||
|
||||
|
@ -14,12 +14,12 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
BackupConcurrencyCheck::BackupConcurrencyCheck(
|
||||
const UUID & backup_or_restore_uuid_,
|
||||
bool is_restore_,
|
||||
bool on_cluster_,
|
||||
const String & zookeeper_path_,
|
||||
bool allow_concurrency_,
|
||||
BackupConcurrencyCounters & counters_)
|
||||
: is_restore(is_restore_), backup_or_restore_uuid(backup_or_restore_uuid_), on_cluster(on_cluster_), counters(counters_)
|
||||
: is_restore(is_restore_), on_cluster(on_cluster_), zookeeper_path(zookeeper_path_), counters(counters_)
|
||||
{
|
||||
std::lock_guard lock{counters.mutex};
|
||||
|
||||
@ -32,7 +32,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
|
||||
size_t num_on_cluster_restores = counters.on_cluster_restores.size();
|
||||
if (on_cluster)
|
||||
{
|
||||
if (!counters.on_cluster_restores.contains(backup_or_restore_uuid))
|
||||
if (!counters.on_cluster_restores.contains(zookeeper_path))
|
||||
++num_on_cluster_restores;
|
||||
}
|
||||
else
|
||||
@ -47,7 +47,7 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
|
||||
size_t num_on_cluster_backups = counters.on_cluster_backups.size();
|
||||
if (on_cluster)
|
||||
{
|
||||
if (!counters.on_cluster_backups.contains(backup_or_restore_uuid))
|
||||
if (!counters.on_cluster_backups.contains(zookeeper_path))
|
||||
++num_on_cluster_backups;
|
||||
}
|
||||
else
|
||||
@ -64,9 +64,9 @@ BackupConcurrencyCheck::BackupConcurrencyCheck(
|
||||
if (on_cluster)
|
||||
{
|
||||
if (is_restore)
|
||||
++counters.on_cluster_restores[backup_or_restore_uuid];
|
||||
++counters.on_cluster_restores[zookeeper_path];
|
||||
else
|
||||
++counters.on_cluster_backups[backup_or_restore_uuid];
|
||||
++counters.on_cluster_backups[zookeeper_path];
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -86,7 +86,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck()
|
||||
{
|
||||
if (is_restore)
|
||||
{
|
||||
auto it = counters.on_cluster_restores.find(backup_or_restore_uuid);
|
||||
auto it = counters.on_cluster_restores.find(zookeeper_path);
|
||||
if (it != counters.on_cluster_restores.end())
|
||||
{
|
||||
if (!--it->second)
|
||||
@ -95,7 +95,7 @@ BackupConcurrencyCheck::~BackupConcurrencyCheck()
|
||||
}
|
||||
else
|
||||
{
|
||||
auto it = counters.on_cluster_backups.find(backup_or_restore_uuid);
|
||||
auto it = counters.on_cluster_backups.find(zookeeper_path);
|
||||
if (it != counters.on_cluster_backups.end())
|
||||
{
|
||||
if (!--it->second)
|
||||
|
@ -1,7 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/UUID.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/types.h>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
@ -19,9 +20,9 @@ public:
|
||||
/// Checks concurrency of a BACKUP operation or a RESTORE operation.
|
||||
/// Keep a constructed instance of BackupConcurrencyCheck until the operation is done.
|
||||
BackupConcurrencyCheck(
|
||||
const UUID & backup_or_restore_uuid_,
|
||||
bool is_restore_,
|
||||
bool on_cluster_,
|
||||
const String & zookeeper_path_,
|
||||
bool allow_concurrency_,
|
||||
BackupConcurrencyCounters & counters_);
|
||||
|
||||
@ -31,8 +32,8 @@ public:
|
||||
|
||||
private:
|
||||
const bool is_restore;
|
||||
const UUID backup_or_restore_uuid;
|
||||
const bool on_cluster;
|
||||
const String zookeeper_path;
|
||||
BackupConcurrencyCounters & counters;
|
||||
};
|
||||
|
||||
@ -47,8 +48,8 @@ private:
|
||||
friend class BackupConcurrencyCheck;
|
||||
size_t local_backups TSA_GUARDED_BY(mutex) = 0;
|
||||
size_t local_restores TSA_GUARDED_BY(mutex) = 0;
|
||||
std::unordered_map<UUID /* backup_uuid */, size_t /* num_refs */> on_cluster_backups TSA_GUARDED_BY(mutex);
|
||||
std::unordered_map<UUID /* restore_uuid */, size_t /* num_refs */> on_cluster_restores TSA_GUARDED_BY(mutex);
|
||||
std::unordered_map<String /* zookeeper_path */, size_t /* num_refs */> on_cluster_backups TSA_GUARDED_BY(mutex);
|
||||
std::unordered_map<String /* zookeeper_path */, size_t /* num_refs */> on_cluster_restores TSA_GUARDED_BY(mutex);
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -4,31 +4,29 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
BackupCoordinationCleaner::BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_)
|
||||
: zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_)
|
||||
BackupCoordinationCleaner::BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_)
|
||||
: is_restore(is_restore_), zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_)
|
||||
{
|
||||
}
|
||||
|
||||
void BackupCoordinationCleaner::cleanup()
|
||||
bool BackupCoordinationCleaner::cleanup(bool throw_if_error)
|
||||
{
|
||||
tryRemoveAllNodes(/* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal);
|
||||
WithRetries::Kind retries_kind = throw_if_error ? WithRetries::kNormal : WithRetries::kErrorHandling;
|
||||
return cleanupImpl(throw_if_error, retries_kind);
|
||||
}
|
||||
|
||||
bool BackupCoordinationCleaner::tryCleanupAfterError() noexcept
|
||||
{
|
||||
return tryRemoveAllNodes(/* throw_if_error = */ false, /* retries_kind = */ WithRetries::kNormal);
|
||||
}
|
||||
|
||||
bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind)
|
||||
bool BackupCoordinationCleaner::cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind)
|
||||
{
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (cleanup_result.succeeded)
|
||||
return true;
|
||||
if (cleanup_result.exception)
|
||||
if (succeeded)
|
||||
{
|
||||
if (throw_if_error)
|
||||
std::rethrow_exception(cleanup_result.exception);
|
||||
LOG_TRACE(log, "Nodes from ZooKeeper are already removed");
|
||||
return true;
|
||||
}
|
||||
if (tried)
|
||||
{
|
||||
LOG_INFO(log, "Skipped removing nodes from ZooKeeper because because earlier we failed to do that");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -44,16 +42,18 @@ bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetri
|
||||
});
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
cleanup_result.succeeded = true;
|
||||
tried = true;
|
||||
succeeded = true;
|
||||
return true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this restore: {}",
|
||||
LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this {}: {}",
|
||||
is_restore ? "restore" : "backup",
|
||||
getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true));
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
cleanup_result.exception = std::current_exception();
|
||||
tried = true;
|
||||
|
||||
if (throw_if_error)
|
||||
throw;
|
||||
|
@ -12,14 +12,14 @@ namespace DB
|
||||
class BackupCoordinationCleaner
|
||||
{
|
||||
public:
|
||||
BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_);
|
||||
BackupCoordinationCleaner(bool is_restore_, const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_);
|
||||
|
||||
void cleanup();
|
||||
bool tryCleanupAfterError() noexcept;
|
||||
bool cleanup(bool throw_if_error);
|
||||
|
||||
private:
|
||||
bool tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind);
|
||||
bool cleanupImpl(bool throw_if_error, WithRetries::Kind retries_kind);
|
||||
|
||||
const bool is_restore;
|
||||
const String zookeeper_path;
|
||||
|
||||
/// A reference to a field of the parent object which is either BackupCoordinationOnCluster or RestoreCoordinationOnCluster.
|
||||
@ -27,13 +27,8 @@ private:
|
||||
|
||||
const LoggerPtr log;
|
||||
|
||||
struct CleanupResult
|
||||
{
|
||||
bool succeeded = false;
|
||||
std::exception_ptr exception;
|
||||
};
|
||||
CleanupResult cleanup_result TSA_GUARDED_BY(mutex);
|
||||
|
||||
bool tried TSA_GUARDED_BY(mutex) = false;
|
||||
bool succeeded TSA_GUARDED_BY(mutex) = false;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -11,12 +11,11 @@ namespace DB
|
||||
{
|
||||
|
||||
BackupCoordinationLocal::BackupCoordinationLocal(
|
||||
const UUID & backup_uuid_,
|
||||
bool is_plain_backup_,
|
||||
bool allow_concurrent_backup_,
|
||||
BackupConcurrencyCounters & concurrency_counters_)
|
||||
: log(getLogger("BackupCoordinationLocal"))
|
||||
, concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ false, allow_concurrent_backup_, concurrency_counters_)
|
||||
, concurrency_check(/* is_restore = */ false, /* on_cluster = */ false, /* zookeeper_path = */ "", allow_concurrent_backup_, concurrency_counters_)
|
||||
, file_infos(is_plain_backup_)
|
||||
{
|
||||
}
|
||||
|
@ -23,20 +23,19 @@ class BackupCoordinationLocal : public IBackupCoordination
|
||||
{
|
||||
public:
|
||||
explicit BackupCoordinationLocal(
|
||||
const UUID & backup_uuid_,
|
||||
bool is_plain_backup_,
|
||||
bool allow_concurrent_backup_,
|
||||
BackupConcurrencyCounters & concurrency_counters_);
|
||||
|
||||
~BackupCoordinationLocal() override;
|
||||
|
||||
void setBackupQueryIsSentToOtherHosts() override {}
|
||||
bool isBackupQuerySentToOtherHosts() const override { return false; }
|
||||
Strings setStage(const String &, const String &, bool) override { return {}; }
|
||||
void setBackupQueryWasSentToOtherHosts() override {}
|
||||
bool trySetError(std::exception_ptr) override { return true; }
|
||||
void finish() override {}
|
||||
bool tryFinishAfterError() noexcept override { return true; }
|
||||
void waitForOtherHostsToFinish() override {}
|
||||
bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; }
|
||||
bool setError(std::exception_ptr, bool) override { return true; }
|
||||
bool waitOtherHostsFinish(bool) const override { return true; }
|
||||
bool finish(bool) override { return true; }
|
||||
bool cleanup(bool) override { return true; }
|
||||
|
||||
void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
|
||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
|
||||
|
@ -184,17 +184,21 @@ BackupCoordinationOnCluster::BackupCoordinationOnCluster(
|
||||
, plain_backup(is_plain_backup_)
|
||||
, log(getLogger("BackupCoordinationOnCluster"))
|
||||
, with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); })
|
||||
, concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ true, allow_concurrent_backup_, concurrency_counters_)
|
||||
, stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, with_retries, schedule_, process_list_element_, log)
|
||||
, cleaner(zookeeper_path, with_retries, log)
|
||||
, cleaner(/* is_restore = */ false, zookeeper_path, with_retries, log)
|
||||
, stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, concurrency_counters_, with_retries, schedule_, process_list_element_, log)
|
||||
{
|
||||
createRootNodes();
|
||||
try
|
||||
{
|
||||
createRootNodes();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
stage_sync.setError(std::current_exception(), /* throw_if_error = */ false);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
BackupCoordinationOnCluster::~BackupCoordinationOnCluster()
|
||||
{
|
||||
tryFinishImpl();
|
||||
}
|
||||
BackupCoordinationOnCluster::~BackupCoordinationOnCluster() = default;
|
||||
|
||||
void BackupCoordinationOnCluster::createRootNodes()
|
||||
{
|
||||
@ -217,69 +221,52 @@ void BackupCoordinationOnCluster::createRootNodes()
|
||||
});
|
||||
}
|
||||
|
||||
void BackupCoordinationOnCluster::setBackupQueryIsSentToOtherHosts()
|
||||
{
|
||||
stage_sync.setQueryIsSentToOtherHosts();
|
||||
}
|
||||
|
||||
bool BackupCoordinationOnCluster::isBackupQuerySentToOtherHosts() const
|
||||
{
|
||||
return stage_sync.isQuerySentToOtherHosts();
|
||||
}
|
||||
|
||||
Strings BackupCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync)
|
||||
{
|
||||
stage_sync.setStage(new_stage, message);
|
||||
|
||||
if (!sync)
|
||||
return {};
|
||||
|
||||
return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator);
|
||||
if (sync)
|
||||
return stage_sync.waitHostsReachStage(all_hosts_without_initiator, new_stage);
|
||||
return {};
|
||||
}
|
||||
|
||||
void BackupCoordinationOnCluster::setBackupQueryWasSentToOtherHosts()
|
||||
bool BackupCoordinationOnCluster::setError(std::exception_ptr exception, bool throw_if_error)
|
||||
{
|
||||
backup_query_was_sent_to_other_hosts = true;
|
||||
return stage_sync.setError(exception, throw_if_error);
|
||||
}
|
||||
|
||||
bool BackupCoordinationOnCluster::trySetError(std::exception_ptr exception)
|
||||
bool BackupCoordinationOnCluster::waitOtherHostsFinish(bool throw_if_error) const
|
||||
{
|
||||
return stage_sync.trySetError(exception);
|
||||
return stage_sync.waitOtherHostsFinish(throw_if_error);
|
||||
}
|
||||
|
||||
void BackupCoordinationOnCluster::finish()
|
||||
bool BackupCoordinationOnCluster::finish(bool throw_if_error)
|
||||
{
|
||||
bool other_hosts_also_finished = false;
|
||||
stage_sync.finish(other_hosts_also_finished);
|
||||
|
||||
if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts))
|
||||
cleaner.cleanup();
|
||||
return stage_sync.finish(throw_if_error);
|
||||
}
|
||||
|
||||
bool BackupCoordinationOnCluster::tryFinishAfterError() noexcept
|
||||
bool BackupCoordinationOnCluster::cleanup(bool throw_if_error)
|
||||
{
|
||||
return tryFinishImpl();
|
||||
}
|
||||
|
||||
bool BackupCoordinationOnCluster::tryFinishImpl() noexcept
|
||||
{
|
||||
bool other_hosts_also_finished = false;
|
||||
if (!stage_sync.tryFinishAfterError(other_hosts_also_finished))
|
||||
return false;
|
||||
|
||||
if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts))
|
||||
/// All the hosts must finish before we remove the coordination nodes.
|
||||
bool expect_other_hosts_finished = stage_sync.isQuerySentToOtherHosts() || !stage_sync.isErrorSet();
|
||||
bool all_hosts_finished = stage_sync.finished() && (stage_sync.otherHostsFinished() || !expect_other_hosts_finished);
|
||||
if (!all_hosts_finished)
|
||||
{
|
||||
if (!cleaner.tryCleanupAfterError())
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BackupCoordinationOnCluster::waitForOtherHostsToFinish()
|
||||
{
|
||||
if ((current_host != kInitiator) || !backup_query_was_sent_to_other_hosts)
|
||||
return;
|
||||
stage_sync.waitForOtherHostsToFinish();
|
||||
}
|
||||
|
||||
bool BackupCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept
|
||||
{
|
||||
if (current_host != kInitiator)
|
||||
auto unfinished_hosts = expect_other_hosts_finished ? stage_sync.getUnfinishedHosts() : Strings{current_host};
|
||||
LOG_INFO(log, "Skipping removing nodes from ZooKeeper because hosts {} didn't finish",
|
||||
BackupCoordinationStageSync::getHostsDesc(unfinished_hosts));
|
||||
return false;
|
||||
if (!backup_query_was_sent_to_other_hosts)
|
||||
return true;
|
||||
return stage_sync.tryWaitForOtherHostsToFinishAfterError();
|
||||
}
|
||||
return cleaner.cleanup(throw_if_error);
|
||||
}
|
||||
|
||||
ZooKeeperRetriesInfo BackupCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Backups/IBackupCoordination.h>
|
||||
#include <Backups/BackupConcurrencyCheck.h>
|
||||
#include <Backups/BackupCoordinationCleaner.h>
|
||||
#include <Backups/BackupCoordinationFileInfos.h>
|
||||
#include <Backups/BackupCoordinationReplicatedAccess.h>
|
||||
@ -20,7 +19,7 @@ class BackupCoordinationOnCluster : public IBackupCoordination
|
||||
{
|
||||
public:
|
||||
/// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER query.
|
||||
static const constexpr std::string_view kInitiator;
|
||||
static const constexpr std::string_view kInitiator = BackupCoordinationStageSync::kInitiator;
|
||||
|
||||
BackupCoordinationOnCluster(
|
||||
const UUID & backup_uuid_,
|
||||
@ -37,13 +36,13 @@ public:
|
||||
|
||||
~BackupCoordinationOnCluster() override;
|
||||
|
||||
void setBackupQueryIsSentToOtherHosts() override;
|
||||
bool isBackupQuerySentToOtherHosts() const override;
|
||||
Strings setStage(const String & new_stage, const String & message, bool sync) override;
|
||||
void setBackupQueryWasSentToOtherHosts() override;
|
||||
bool trySetError(std::exception_ptr exception) override;
|
||||
void finish() override;
|
||||
bool tryFinishAfterError() noexcept override;
|
||||
void waitForOtherHostsToFinish() override;
|
||||
bool tryWaitForOtherHostsToFinishAfterError() noexcept override;
|
||||
bool setError(std::exception_ptr exception, bool throw_if_error) override;
|
||||
bool waitOtherHostsFinish(bool throw_if_error) const override;
|
||||
bool finish(bool throw_if_error) override;
|
||||
bool cleanup(bool throw_if_error) override;
|
||||
|
||||
void addReplicatedPartNames(
|
||||
const String & table_zk_path,
|
||||
@ -110,11 +109,10 @@ private:
|
||||
const bool plain_backup;
|
||||
LoggerPtr const log;
|
||||
|
||||
/// The order is important: `stage_sync` must be initialized after `with_retries` and `cleaner`.
|
||||
const WithRetries with_retries;
|
||||
BackupConcurrencyCheck concurrency_check;
|
||||
BackupCoordinationStageSync stage_sync;
|
||||
BackupCoordinationCleaner cleaner;
|
||||
std::atomic<bool> backup_query_was_sent_to_other_hosts = false;
|
||||
BackupCoordinationStageSync stage_sync;
|
||||
|
||||
mutable std::optional<BackupCoordinationReplicatedTables> replicated_tables TSA_GUARDED_BY(replicated_tables_mutex);
|
||||
mutable std::optional<BackupCoordinationReplicatedAccess> replicated_access TSA_GUARDED_BY(replicated_access_mutex);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Backups/BackupConcurrencyCheck.h>
|
||||
#include <Backups/WithRetries.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -9,12 +11,16 @@ namespace DB
|
||||
class BackupCoordinationStageSync
|
||||
{
|
||||
public:
|
||||
/// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER or RESTORE ON CLUSTER query.
|
||||
static const constexpr std::string_view kInitiator;
|
||||
|
||||
BackupCoordinationStageSync(
|
||||
bool is_restore_, /// true if this is a RESTORE ON CLUSTER command, false if this is a BACKUP ON CLUSTER command
|
||||
const String & zookeeper_path_, /// path to the "stage" folder in ZooKeeper
|
||||
const String & current_host_, /// the current host, or an empty string if it's the initiator of the BACKUP/RESTORE ON CLUSTER command
|
||||
const Strings & all_hosts_, /// all the hosts (including the initiator and the current host) performing the BACKUP/RESTORE ON CLUSTER command
|
||||
bool allow_concurrency_, /// whether it's allowed to have concurrent backups or restores.
|
||||
BackupConcurrencyCounters & concurrency_counters_,
|
||||
const WithRetries & with_retries_,
|
||||
ThreadPoolCallbackRunnerUnsafe<void> schedule_,
|
||||
QueryStatusPtr process_list_element_,
|
||||
@ -22,30 +28,37 @@ public:
|
||||
|
||||
~BackupCoordinationStageSync();
|
||||
|
||||
/// Sets that the BACKUP or RESTORE query was sent to other hosts.
|
||||
void setQueryIsSentToOtherHosts();
|
||||
bool isQuerySentToOtherHosts() const;
|
||||
|
||||
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
|
||||
void setStage(const String & stage, const String & stage_result = {});
|
||||
|
||||
/// Waits until all the specified hosts come to the specified stage.
|
||||
/// The function returns the results which specified hosts set when they came to the required stage.
|
||||
/// If it doesn't happen before the timeout then the function will stop waiting and throw an exception.
|
||||
Strings waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional<std::chrono::milliseconds> timeout = {}) const;
|
||||
|
||||
/// Waits until all the other hosts finish their work.
|
||||
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
|
||||
void waitForOtherHostsToFinish() const;
|
||||
|
||||
/// Lets other host know that the current host has finished its work.
|
||||
void finish(bool & other_hosts_also_finished);
|
||||
/// Waits until specified hosts come to the specified stage.
|
||||
/// The function returns the results which the specified hosts set when they came to the required stage.
|
||||
Strings waitHostsReachStage(const Strings & hosts, const String & stage_to_wait) const;
|
||||
|
||||
/// Lets other hosts know that the current host has encountered an error.
|
||||
bool trySetError(std::exception_ptr exception) noexcept;
|
||||
/// The function returns true if it successfully created the error node or if the error node was found already exist.
|
||||
bool setError(std::exception_ptr exception, bool throw_if_error);
|
||||
bool isErrorSet() const;
|
||||
|
||||
/// Waits until all the other hosts finish their work (as a part of error-handling process).
|
||||
/// Doesn't stops waiting if some host encounters an error or gets cancelled.
|
||||
bool tryWaitForOtherHostsToFinishAfterError() const noexcept;
|
||||
/// Waits until the hosts other than the current host finish their work. Must be called before finish().
|
||||
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
|
||||
bool waitOtherHostsFinish(bool throw_if_error) const;
|
||||
bool otherHostsFinished() const;
|
||||
|
||||
/// Lets other host know that the current host has finished its work (as a part of error-handling process).
|
||||
bool tryFinishAfterError(bool & other_hosts_also_finished) noexcept;
|
||||
/// Lets other hosts know that the current host has finished its work.
|
||||
bool finish(bool throw_if_error);
|
||||
bool finished() const;
|
||||
|
||||
/// Returns true if all the hosts have finished.
|
||||
bool allHostsFinished() const { return finished() && otherHostsFinished(); }
|
||||
|
||||
/// Returns a list of the hosts which haven't finished yet.
|
||||
Strings getUnfinishedHosts() const;
|
||||
Strings getUnfinishedOtherHosts() const;
|
||||
|
||||
/// Returns a printable name of a specific host. For empty host the function returns "initiator".
|
||||
static String getHostDesc(const String & host);
|
||||
@ -59,8 +72,8 @@ private:
|
||||
void createRootNodes();
|
||||
|
||||
/// Atomically creates both 'start' and 'alive' nodes and also checks that there is no concurrent backup or restore if `allow_concurrency` is false.
|
||||
void createStartAndAliveNodes();
|
||||
void createStartAndAliveNodes(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
void createStartAndAliveNodesAndCheckConcurrency(BackupConcurrencyCounters & concurrency_counters_);
|
||||
void createStartAndAliveNodesAndCheckConcurrency(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
|
||||
/// Deserialize the version of a node stored in the 'start' node.
|
||||
int parseStartNode(const String & start_node_contents, const String & host) const;
|
||||
@ -78,14 +91,17 @@ private:
|
||||
|
||||
/// Reads the current state from ZooKeeper without throwing exceptions.
|
||||
void readCurrentState(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
|
||||
/// Creates a stage node to let other hosts know we've reached the specified stage.
|
||||
void createStageNode(const String & stage, const String & stage_result, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
String getStageNodePath(const String & stage) const;
|
||||
|
||||
/// Lets other hosts know that the current host has encountered an error.
|
||||
bool trySetError(const Exception & exception);
|
||||
void setError(const Exception & exception);
|
||||
bool setError(const Exception & exception, bool throw_if_error);
|
||||
void createErrorNode(const Exception & exception, Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
|
||||
/// Deserializes an error stored in the error node.
|
||||
static std::pair<std::exception_ptr, String> parseErrorNode(const String & error_node_contents);
|
||||
std::pair<std::exception_ptr, String> parseErrorNode(const String & error_node_contents) const;
|
||||
|
||||
/// Reset the `connected` flag for each host.
|
||||
void resetConnectedFlag();
|
||||
@ -102,19 +118,27 @@ private:
|
||||
void cancelQueryIfDisconnectedTooLong();
|
||||
|
||||
/// Used by waitForHostsToReachStage() to check if everything is ready to return.
|
||||
bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, bool time_is_out, std::optional<std::chrono::milliseconds> timeout, Strings & results) const TSA_REQUIRES(mutex);
|
||||
bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, Strings & results) const TSA_REQUIRES(mutex);
|
||||
|
||||
/// Creates the 'finish' node.
|
||||
bool tryFinishImpl();
|
||||
bool tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind);
|
||||
void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper);
|
||||
bool finishImpl(bool throw_if_error, WithRetries::Kind retries_kind);
|
||||
void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper, bool throw_if_error);
|
||||
|
||||
/// Returns the version used by the initiator.
|
||||
int getInitiatorVersion() const;
|
||||
|
||||
/// Waits until all the other hosts finish their work.
|
||||
bool tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional<std::chrono::seconds> timeout) const;
|
||||
bool checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional<std::chrono::milliseconds> timeout) const TSA_REQUIRES(mutex);
|
||||
bool waitOtherHostsFinishImpl(const String & reason, std::optional<std::chrono::seconds> timeout, bool throw_if_error) const;
|
||||
bool checkIfOtherHostsFinish(const String & reason, std::optional<std::chrono::milliseconds> timeout, bool time_is_out, bool & result, bool throw_if_error) const TSA_REQUIRES(mutex);
|
||||
|
||||
/// Returns true if all the hosts have finished.
|
||||
bool allHostsFinishedNoLock() const TSA_REQUIRES(mutex);
|
||||
bool finishedNoLock() const TSA_REQUIRES(mutex);
|
||||
bool otherHostsFinishedNoLock() const TSA_REQUIRES(mutex);
|
||||
|
||||
/// Returns a list of the hosts which haven't finished yet.
|
||||
Strings getUnfinishedHostsNoLock() const TSA_REQUIRES(mutex);
|
||||
Strings getUnfinishedOtherHostsNoLock() const TSA_REQUIRES(mutex);
|
||||
|
||||
const bool is_restore;
|
||||
const String operation_name;
|
||||
@ -138,15 +162,16 @@ private:
|
||||
/// Paths in ZooKeeper.
|
||||
const std::filesystem::path zookeeper_path;
|
||||
const String root_zookeeper_path;
|
||||
const String operation_node_path;
|
||||
const String operation_zookeeper_path;
|
||||
const String operation_node_name;
|
||||
const String stage_node_path;
|
||||
const String start_node_path;
|
||||
const String finish_node_path;
|
||||
const String num_hosts_node_path;
|
||||
const String error_node_path;
|
||||
const String alive_node_path;
|
||||
const String alive_tracker_node_path;
|
||||
const String error_node_path;
|
||||
|
||||
std::optional<BackupConcurrencyCheck> local_concurrency_check;
|
||||
|
||||
std::shared_ptr<Poco::Event> zk_nodes_changed;
|
||||
|
||||
@ -176,25 +201,21 @@ private:
|
||||
{
|
||||
std::map<String /* host */, HostInfo> hosts; /// std::map because we need to compare states
|
||||
std::optional<String> host_with_error;
|
||||
bool cancelled = false;
|
||||
|
||||
bool operator ==(const State & other) const;
|
||||
bool operator !=(const State & other) const;
|
||||
void merge(const State & other);
|
||||
};
|
||||
|
||||
State state TSA_GUARDED_BY(mutex);
|
||||
mutable std::condition_variable state_changed;
|
||||
|
||||
std::future<void> watching_thread_future;
|
||||
std::atomic<bool> should_stop_watching_thread = false;
|
||||
bool should_stop_watching_thread TSA_GUARDED_BY(mutex) = false;
|
||||
|
||||
struct FinishResult
|
||||
{
|
||||
bool succeeded = false;
|
||||
std::exception_ptr exception;
|
||||
bool other_hosts_also_finished = false;
|
||||
};
|
||||
FinishResult finish_result TSA_GUARDED_BY(mutex);
|
||||
bool query_is_sent_to_other_hosts TSA_GUARDED_BY(mutex) = false;
|
||||
bool tried_to_finish TSA_GUARDED_BY(mutex) = false;
|
||||
bool tried_to_set_error TSA_GUARDED_BY(mutex) = false;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
@ -329,6 +329,7 @@ std::pair<OperationID, BackupStatus> BackupsWorker::start(const ASTPtr & backup_
|
||||
struct BackupsWorker::BackupStarter
|
||||
{
|
||||
BackupsWorker & backups_worker;
|
||||
LoggerPtr log;
|
||||
std::shared_ptr<ASTBackupQuery> backup_query;
|
||||
ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using.
|
||||
ContextMutablePtr backup_context;
|
||||
@ -345,6 +346,7 @@ struct BackupsWorker::BackupStarter
|
||||
|
||||
BackupStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_)
|
||||
: backups_worker(backups_worker_)
|
||||
, log(backups_worker.log)
|
||||
, backup_query(std::static_pointer_cast<ASTBackupQuery>(query_->clone()))
|
||||
, query_context(context_)
|
||||
, backup_context(Context::createCopy(query_context))
|
||||
@ -399,9 +401,20 @@ struct BackupsWorker::BackupStarter
|
||||
chassert(!backup);
|
||||
backup = backups_worker.openBackupForWriting(backup_info, backup_settings, backup_coordination, backup_context);
|
||||
|
||||
backups_worker.doBackup(
|
||||
backup, backup_query, backup_id, backup_name_for_logging, backup_settings, backup_coordination, backup_context,
|
||||
on_cluster, cluster);
|
||||
backups_worker.doBackup(backup, backup_query, backup_id, backup_settings, backup_coordination, backup_context,
|
||||
on_cluster, cluster);
|
||||
|
||||
backup_coordination->finish(/* throw_if_error = */ true);
|
||||
backup.reset();
|
||||
|
||||
/// The backup coordination is not needed anymore.
|
||||
if (!is_internal_backup)
|
||||
backup_coordination->cleanup(/* throw_if_error = */ true);
|
||||
backup_coordination.reset();
|
||||
|
||||
/// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record
|
||||
LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging);
|
||||
backups_worker.setStatus(backup_id, BackupStatus::BACKUP_CREATED);
|
||||
}
|
||||
|
||||
void onException()
|
||||
@ -416,16 +429,29 @@ struct BackupsWorker::BackupStarter
|
||||
if (backup && !backup->setIsCorrupted())
|
||||
should_remove_files_in_backup = false;
|
||||
|
||||
if (backup_coordination && backup_coordination->trySetError(std::current_exception()))
|
||||
bool all_hosts_finished = false;
|
||||
|
||||
if (backup_coordination && backup_coordination->setError(std::current_exception(), /* throw_if_error = */ false))
|
||||
{
|
||||
bool other_hosts_finished = backup_coordination->tryWaitForOtherHostsToFinishAfterError();
|
||||
bool other_hosts_finished = !is_internal_backup
|
||||
&& (!backup_coordination->isBackupQuerySentToOtherHosts() || backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ false));
|
||||
|
||||
if (should_remove_files_in_backup && other_hosts_finished)
|
||||
backup->tryRemoveAllFiles();
|
||||
|
||||
backup_coordination->tryFinishAfterError();
|
||||
all_hosts_finished = backup_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished;
|
||||
}
|
||||
|
||||
if (!all_hosts_finished)
|
||||
should_remove_files_in_backup = false;
|
||||
|
||||
if (backup && should_remove_files_in_backup)
|
||||
backup->tryRemoveAllFiles();
|
||||
|
||||
backup.reset();
|
||||
|
||||
if (backup_coordination && all_hosts_finished)
|
||||
backup_coordination->cleanup(/* throw_if_error = */ false);
|
||||
|
||||
backup_coordination.reset();
|
||||
|
||||
backups_worker.setStatusSafe(backup_id, getBackupStatusFromCurrentException());
|
||||
}
|
||||
};
|
||||
@ -497,7 +523,6 @@ void BackupsWorker::doBackup(
|
||||
BackupMutablePtr backup,
|
||||
const std::shared_ptr<ASTBackupQuery> & backup_query,
|
||||
const OperationID & backup_id,
|
||||
const String & backup_name_for_logging,
|
||||
const BackupSettings & backup_settings,
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination,
|
||||
ContextMutablePtr context,
|
||||
@ -521,10 +546,10 @@ void BackupsWorker::doBackup(
|
||||
backup_settings.copySettingsToQuery(*backup_query);
|
||||
sendQueryToOtherHosts(*backup_query, cluster, backup_settings.shard_num, backup_settings.replica_num,
|
||||
context, required_access, backup_coordination->getOnClusterInitializationKeeperRetriesInfo());
|
||||
backup_coordination->setBackupQueryWasSentToOtherHosts();
|
||||
backup_coordination->setBackupQueryIsSentToOtherHosts();
|
||||
|
||||
/// Wait until all the hosts have written their backup entries.
|
||||
backup_coordination->waitForOtherHostsToFinish();
|
||||
backup_coordination->waitOtherHostsFinish(/* throw_if_error = */ true);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -569,18 +594,8 @@ void BackupsWorker::doBackup(
|
||||
compressed_size = backup->getCompressedSize();
|
||||
}
|
||||
|
||||
/// Close the backup.
|
||||
backup.reset();
|
||||
|
||||
/// The backup coordination is not needed anymore.
|
||||
backup_coordination->finish();
|
||||
|
||||
/// NOTE: we need to update metadata again after backup->finalizeWriting(), because backup metadata is written there.
|
||||
setNumFilesAndSize(backup_id, num_files, total_size, num_entries, uncompressed_size, compressed_size, 0, 0);
|
||||
|
||||
/// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record
|
||||
LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging);
|
||||
setStatus(backup_id, BackupStatus::BACKUP_CREATED);
|
||||
}
|
||||
|
||||
|
||||
@ -687,6 +702,7 @@ void BackupsWorker::writeBackupEntries(
|
||||
struct BackupsWorker::RestoreStarter
|
||||
{
|
||||
BackupsWorker & backups_worker;
|
||||
LoggerPtr log;
|
||||
std::shared_ptr<ASTBackupQuery> restore_query;
|
||||
ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using.
|
||||
ContextMutablePtr restore_context;
|
||||
@ -702,6 +718,7 @@ struct BackupsWorker::RestoreStarter
|
||||
|
||||
RestoreStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_)
|
||||
: backups_worker(backups_worker_)
|
||||
, log(backups_worker.log)
|
||||
, restore_query(std::static_pointer_cast<ASTBackupQuery>(query_->clone()))
|
||||
, query_context(context_)
|
||||
, restore_context(Context::createCopy(query_context))
|
||||
@ -753,16 +770,17 @@ struct BackupsWorker::RestoreStarter
|
||||
}
|
||||
restore_coordination = backups_worker.makeRestoreCoordination(on_cluster, restore_settings, restore_context);
|
||||
|
||||
backups_worker.doRestore(
|
||||
restore_query,
|
||||
restore_id,
|
||||
backup_name_for_logging,
|
||||
backup_info,
|
||||
restore_settings,
|
||||
restore_coordination,
|
||||
restore_context,
|
||||
on_cluster,
|
||||
cluster);
|
||||
backups_worker.doRestore(restore_query, restore_id, backup_info, restore_settings, restore_coordination, restore_context,
|
||||
on_cluster, cluster);
|
||||
|
||||
/// The restore coordination is not needed anymore.
|
||||
restore_coordination->finish(/* throw_if_error = */ true);
|
||||
if (!is_internal_restore)
|
||||
restore_coordination->cleanup(/* throw_if_error = */ true);
|
||||
restore_coordination.reset();
|
||||
|
||||
LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging);
|
||||
backups_worker.setStatus(restore_id, BackupStatus::RESTORED);
|
||||
}
|
||||
|
||||
void onException()
|
||||
@ -770,12 +788,16 @@ struct BackupsWorker::RestoreStarter
|
||||
/// Something bad happened, some data were not restored.
|
||||
tryLogCurrentException(backups_worker.log, fmt::format("Failed to restore from {} {}", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging));
|
||||
|
||||
if (restore_coordination && restore_coordination->trySetError(std::current_exception()))
|
||||
if (restore_coordination && restore_coordination->setError(std::current_exception(), /* throw_if_error = */ false))
|
||||
{
|
||||
restore_coordination->tryWaitForOtherHostsToFinishAfterError();
|
||||
restore_coordination->tryFinishAfterError();
|
||||
bool other_hosts_finished = !is_internal_restore
|
||||
&& (!restore_coordination->isRestoreQuerySentToOtherHosts() || restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ false));
|
||||
if (restore_coordination->finish(/* throw_if_error = */ false) && other_hosts_finished)
|
||||
restore_coordination->cleanup(/* throw_if_error = */ false);
|
||||
}
|
||||
|
||||
restore_coordination.reset();
|
||||
|
||||
backups_worker.setStatusSafe(restore_id, getRestoreStatusFromCurrentException());
|
||||
}
|
||||
};
|
||||
@ -838,7 +860,6 @@ BackupPtr BackupsWorker::openBackupForReading(const BackupInfo & backup_info, co
|
||||
void BackupsWorker::doRestore(
|
||||
const std::shared_ptr<ASTBackupQuery> & restore_query,
|
||||
const OperationID & restore_id,
|
||||
const String & backup_name_for_logging,
|
||||
const BackupInfo & backup_info,
|
||||
RestoreSettings restore_settings,
|
||||
std::shared_ptr<IRestoreCoordination> restore_coordination,
|
||||
@ -882,10 +903,10 @@ void BackupsWorker::doRestore(
|
||||
restore_settings.copySettingsToQuery(*restore_query);
|
||||
sendQueryToOtherHosts(*restore_query, cluster, restore_settings.shard_num, restore_settings.replica_num,
|
||||
context, {}, restore_coordination->getOnClusterInitializationKeeperRetriesInfo());
|
||||
restore_coordination->setRestoreQueryWasSentToOtherHosts();
|
||||
restore_coordination->setRestoreQueryIsSentToOtherHosts();
|
||||
|
||||
/// Wait until all the hosts have done with their restoring work.
|
||||
restore_coordination->waitForOtherHostsToFinish();
|
||||
restore_coordination->waitOtherHostsFinish(/* throw_if_error = */ true);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -905,12 +926,6 @@ void BackupsWorker::doRestore(
|
||||
backup, context, getThreadPool(ThreadPoolId::RESTORE), after_task_callback};
|
||||
restorer.run(RestorerFromBackup::RESTORE);
|
||||
}
|
||||
|
||||
/// The restore coordination is not needed anymore.
|
||||
restore_coordination->finish();
|
||||
|
||||
LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging);
|
||||
setStatus(restore_id, BackupStatus::RESTORED);
|
||||
}
|
||||
|
||||
|
||||
@ -943,7 +958,7 @@ BackupsWorker::makeBackupCoordination(bool on_cluster, const BackupSettings & ba
|
||||
if (!on_cluster)
|
||||
{
|
||||
return std::make_shared<BackupCoordinationLocal>(
|
||||
*backup_settings.backup_uuid, !backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters);
|
||||
!backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters);
|
||||
}
|
||||
|
||||
bool is_internal_backup = backup_settings.internal;
|
||||
@ -981,8 +996,7 @@ BackupsWorker::makeRestoreCoordination(bool on_cluster, const RestoreSettings &
|
||||
{
|
||||
if (!on_cluster)
|
||||
{
|
||||
return std::make_shared<RestoreCoordinationLocal>(
|
||||
*restore_settings.restore_uuid, allow_concurrent_restores, *concurrency_counters);
|
||||
return std::make_shared<RestoreCoordinationLocal>(allow_concurrent_restores, *concurrency_counters);
|
||||
}
|
||||
|
||||
bool is_internal_restore = restore_settings.internal;
|
||||
|
@ -81,7 +81,6 @@ private:
|
||||
BackupMutablePtr backup,
|
||||
const std::shared_ptr<ASTBackupQuery> & backup_query,
|
||||
const BackupOperationID & backup_id,
|
||||
const String & backup_name_for_logging,
|
||||
const BackupSettings & backup_settings,
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination,
|
||||
ContextMutablePtr context,
|
||||
@ -102,7 +101,6 @@ private:
|
||||
void doRestore(
|
||||
const std::shared_ptr<ASTBackupQuery> & restore_query,
|
||||
const BackupOperationID & restore_id,
|
||||
const String & backup_name_for_logging,
|
||||
const BackupInfo & backup_info,
|
||||
RestoreSettings restore_settings,
|
||||
std::shared_ptr<IRestoreCoordination> restore_coordination,
|
||||
|
@ -20,29 +20,27 @@ class IBackupCoordination
|
||||
public:
|
||||
virtual ~IBackupCoordination() = default;
|
||||
|
||||
/// Sets that the backup query was sent to other hosts.
|
||||
/// Function waitOtherHostsFinish() will check that to find out if it should really wait or not.
|
||||
virtual void setBackupQueryIsSentToOtherHosts() = 0;
|
||||
virtual bool isBackupQuerySentToOtherHosts() const = 0;
|
||||
|
||||
/// Sets the current stage and waits for other hosts to come to this stage too.
|
||||
virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0;
|
||||
|
||||
/// Sets that the backup query was sent to other hosts.
|
||||
/// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not.
|
||||
virtual void setBackupQueryWasSentToOtherHosts() = 0;
|
||||
|
||||
/// Lets other hosts know that the current host has encountered an error.
|
||||
virtual bool trySetError(std::exception_ptr exception) = 0;
|
||||
|
||||
/// Lets other hosts know that the current host has finished its work.
|
||||
virtual void finish() = 0;
|
||||
|
||||
/// Lets other hosts know that the current host has finished its work (as a part of error-handling process).
|
||||
virtual bool tryFinishAfterError() noexcept = 0;
|
||||
/// Returns true if the information is successfully passed so other hosts can read it.
|
||||
virtual bool setError(std::exception_ptr exception, bool throw_if_error) = 0;
|
||||
|
||||
/// Waits until all the other hosts finish their work.
|
||||
/// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled.
|
||||
virtual void waitForOtherHostsToFinish() = 0;
|
||||
virtual bool waitOtherHostsFinish(bool throw_if_error) const = 0;
|
||||
|
||||
/// Waits until all the other hosts finish their work (as a part of error-handling process).
|
||||
/// Doesn't stops waiting if some host encounters an error or gets cancelled.
|
||||
virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0;
|
||||
/// Lets other hosts know that the current host has finished its work.
|
||||
virtual bool finish(bool throw_if_error) = 0;
|
||||
|
||||
/// Removes temporary nodes in ZooKeeper.
|
||||
virtual bool cleanup(bool throw_if_error) = 0;
|
||||
|
||||
struct PartNameAndChecksum
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user