mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
eef436fe22
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -246,3 +246,6 @@
|
||||
[submodule "contrib/bzip2"]
|
||||
path = contrib/bzip2
|
||||
url = https://github.com/ClickHouse-Extras/bzip2.git
|
||||
[submodule "contrib/magic_enum"]
|
||||
path = contrib/magic_enum
|
||||
url = https://github.com/Neargye/magic_enum
|
||||
|
@ -80,16 +80,16 @@ include (cmake/find/ccache.cmake)
|
||||
# ccache ignore it.
|
||||
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
|
||||
if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||
# set DATA (since RSS does not work since 2.6.x+) to 2G
|
||||
# set DATA (since RSS does not work since 2.6.x+) to 5G
|
||||
set (RLIMIT_DATA 5000000000)
|
||||
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
|
||||
set (RLIMIT_AS 10000000000)
|
||||
# set CPU time limit to 600 seconds
|
||||
set (RLIMIT_CPU 600)
|
||||
# set CPU time limit to 1000 seconds
|
||||
set (RLIMIT_CPU 1000)
|
||||
|
||||
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
||||
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
|
||||
set (RLIMIT_DATA 10000000000)
|
||||
set (RLIMIT_DATA 10000000000) # 10G
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
@ -152,6 +152,7 @@ if (CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DISABLE_COLORED_BUILD)
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always")
|
||||
endif ()
|
||||
|
||||
include (cmake/check_flags.cmake)
|
||||
include (cmake/add_warning.cmake)
|
||||
|
||||
if (NOT MSVC)
|
||||
@ -166,7 +167,8 @@ if (COMPILER_CLANG)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0)
|
||||
if (HAS_USE_CTOR_HOMING)
|
||||
# For more info see https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
|
||||
@ -192,7 +194,7 @@ endif ()
|
||||
# Make sure the final executable has symbols exported
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
||||
|
||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-13" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
||||
|
||||
if (NOT OBJCOPY_PATH AND OS_DARWIN)
|
||||
find_program (BREW_PATH NAMES "brew")
|
||||
@ -379,7 +381,7 @@ if (COMPILER_CLANG)
|
||||
endif ()
|
||||
|
||||
# Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-13" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||
|
||||
if (LLVM_AR_PATH)
|
||||
message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.")
|
||||
@ -388,7 +390,7 @@ if (COMPILER_CLANG)
|
||||
message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.")
|
||||
endif ()
|
||||
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-13" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
|
||||
|
||||
if (LLVM_RANLIB_PATH)
|
||||
message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.")
|
||||
@ -629,9 +631,6 @@ include_directories(${ConfigIncludePath})
|
||||
# Add as many warnings as possible for our own code.
|
||||
include (cmake/warnings.cmake)
|
||||
|
||||
# Check if needed compiler flags are supported
|
||||
include (cmake/check_flags.cmake)
|
||||
|
||||
add_subdirectory (base)
|
||||
add_subdirectory (src)
|
||||
add_subdirectory (programs)
|
||||
|
@ -85,6 +85,7 @@ target_link_libraries (common
|
||||
replxx
|
||||
cctz
|
||||
fmt
|
||||
magic_enum
|
||||
)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
|
157
base/common/Decimal.h
Normal file
157
base/common/Decimal.h
Normal file
@ -0,0 +1,157 @@
|
||||
#pragma once
|
||||
#include "common/extended_types.h"
|
||||
|
||||
#if !defined(NO_SANITIZE_UNDEFINED)
|
||||
#if defined(__clang__)
|
||||
#define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
||||
#else
|
||||
#define NO_SANITIZE_UNDEFINED
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <class> struct Decimal;
|
||||
class DateTime64;
|
||||
|
||||
using Decimal32 = Decimal<Int32>;
|
||||
using Decimal64 = Decimal<Int64>;
|
||||
using Decimal128 = Decimal<Int128>;
|
||||
using Decimal256 = Decimal<Int256>;
|
||||
|
||||
template <class T>
|
||||
concept is_decimal =
|
||||
std::is_same_v<T, Decimal32>
|
||||
|| std::is_same_v<T, Decimal64>
|
||||
|| std::is_same_v<T, Decimal128>
|
||||
|| std::is_same_v<T, Decimal256>
|
||||
|| std::is_same_v<T, DateTime64>;
|
||||
|
||||
template <class T>
|
||||
concept is_over_big_int =
|
||||
std::is_same_v<T, Int128>
|
||||
|| std::is_same_v<T, UInt128>
|
||||
|| std::is_same_v<T, Int256>
|
||||
|| std::is_same_v<T, UInt256>
|
||||
|| std::is_same_v<T, Decimal128>
|
||||
|| std::is_same_v<T, Decimal256>;
|
||||
|
||||
template <class T> struct NativeTypeT { using Type = T; };
|
||||
template <is_decimal T> struct NativeTypeT<T> { using Type = typename T::NativeType; };
|
||||
template <class T> using NativeType = typename NativeTypeT<T>::Type;
|
||||
|
||||
/// Own FieldType for Decimal.
|
||||
/// It is only a "storage" for decimal.
|
||||
/// To perform operations, you also have to provide a scale (number of digits after point).
|
||||
template <typename T>
|
||||
struct Decimal
|
||||
{
|
||||
using NativeType = T;
|
||||
|
||||
constexpr Decimal() = default;
|
||||
constexpr Decimal(Decimal<T> &&) = default;
|
||||
constexpr Decimal(const Decimal<T> &) = default;
|
||||
|
||||
constexpr Decimal(const T & value_): value(value_) {}
|
||||
|
||||
template <typename U>
|
||||
constexpr Decimal(const Decimal<U> & x): value(x.value) {}
|
||||
|
||||
constexpr Decimal<T> & operator = (Decimal<T> &&) = default;
|
||||
constexpr Decimal<T> & operator = (const Decimal<T> &) = default;
|
||||
|
||||
constexpr operator T () const { return value; }
|
||||
|
||||
template <typename U>
|
||||
constexpr U convertTo() const
|
||||
{
|
||||
if constexpr (is_decimal<U>)
|
||||
return convertTo<typename U::NativeType>();
|
||||
else
|
||||
return static_cast<U>(value);
|
||||
}
|
||||
|
||||
const Decimal<T> & operator += (const T & x) { value += x; return *this; }
|
||||
const Decimal<T> & operator -= (const T & x) { value -= x; return *this; }
|
||||
const Decimal<T> & operator *= (const T & x) { value *= x; return *this; }
|
||||
const Decimal<T> & operator /= (const T & x) { value /= x; return *this; }
|
||||
const Decimal<T> & operator %= (const T & x) { value %= x; return *this; }
|
||||
|
||||
template <typename U> const Decimal<T> & operator += (const Decimal<U> & x) { value += x.value; return *this; }
|
||||
template <typename U> const Decimal<T> & operator -= (const Decimal<U> & x) { value -= x.value; return *this; }
|
||||
template <typename U> const Decimal<T> & operator *= (const Decimal<U> & x) { value *= x.value; return *this; }
|
||||
template <typename U> const Decimal<T> & operator /= (const Decimal<U> & x) { value /= x.value; return *this; }
|
||||
template <typename U> const Decimal<T> & operator %= (const Decimal<U> & x) { value %= x.value; return *this; }
|
||||
|
||||
/// This is to avoid UB for sumWithOverflow()
|
||||
void NO_SANITIZE_UNDEFINED addOverflow(const T & x) { value += x; }
|
||||
|
||||
T value;
|
||||
};
|
||||
|
||||
template <typename T> inline bool operator< (const Decimal<T> & x, const Decimal<T> & y) { return x.value < y.value; }
|
||||
template <typename T> inline bool operator> (const Decimal<T> & x, const Decimal<T> & y) { return x.value > y.value; }
|
||||
template <typename T> inline bool operator<= (const Decimal<T> & x, const Decimal<T> & y) { return x.value <= y.value; }
|
||||
template <typename T> inline bool operator>= (const Decimal<T> & x, const Decimal<T> & y) { return x.value >= y.value; }
|
||||
template <typename T> inline bool operator== (const Decimal<T> & x, const Decimal<T> & y) { return x.value == y.value; }
|
||||
template <typename T> inline bool operator!= (const Decimal<T> & x, const Decimal<T> & y) { return x.value != y.value; }
|
||||
|
||||
template <typename T> inline Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y) { return x.value + y.value; }
|
||||
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y) { return x.value - y.value; }
|
||||
template <typename T> inline Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y) { return x.value * y.value; }
|
||||
template <typename T> inline Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y) { return x.value / y.value; }
|
||||
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x) { return -x.value; }
|
||||
|
||||
/// Distinguishable type to allow function resolution/deduction based on value type,
|
||||
/// but also relatively easy to convert to/from Decimal64.
|
||||
class DateTime64 : public Decimal64
|
||||
{
|
||||
public:
|
||||
using Base = Decimal64;
|
||||
using Base::Base;
|
||||
using NativeType = Base::NativeType;
|
||||
|
||||
constexpr DateTime64(const Base & v): Base(v) {}
|
||||
};
|
||||
}
|
||||
|
||||
constexpr DB::UInt64 max_uint_mask = std::numeric_limits<DB::UInt64>::max();
|
||||
|
||||
namespace std
|
||||
{
|
||||
template <typename T>
|
||||
struct hash<DB::Decimal<T>>
|
||||
{
|
||||
size_t operator()(const DB::Decimal<T> & x) const { return hash<T>()(x.value); }
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<DB::Decimal128>
|
||||
{
|
||||
size_t operator()(const DB::Decimal128 & x) const
|
||||
{
|
||||
return std::hash<DB::Int64>()(x.value >> 64)
|
||||
^ std::hash<DB::Int64>()(x.value & max_uint_mask);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<DB::DateTime64>
|
||||
{
|
||||
size_t operator()(const DB::DateTime64 & x) const
|
||||
{
|
||||
return std::hash<DB::DateTime64::NativeType>()(x);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<DB::Decimal256>
|
||||
{
|
||||
size_t operator()(const DB::Decimal256 & x) const
|
||||
{
|
||||
// FIXME temp solution
|
||||
return std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value >> 64 & max_uint_mask))
|
||||
^ std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value & max_uint_mask));
|
||||
}
|
||||
};
|
||||
}
|
38
base/common/EnumReflection.h
Normal file
38
base/common/EnumReflection.h
Normal file
@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
|
||||
#include <magic_enum.hpp>
|
||||
#include <fmt/format.h>
|
||||
|
||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template <is_enum E, class F, size_t ...I>
|
||||
constexpr void static_for(F && f, std::index_sequence<I...>)
|
||||
{
|
||||
(std::forward<F>(f)(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over enum values in compile-time (compile-time switch/case, loop unrolling).
|
||||
*
|
||||
* @example static_for<E>([](auto enum_value) { return template_func<enum_value>(); }
|
||||
* ^ enum_value can be used as a template parameter
|
||||
*/
|
||||
template <is_enum E, class F>
|
||||
constexpr void static_for(F && f)
|
||||
{
|
||||
constexpr size_t count = magic_enum::enum_count<E>();
|
||||
detail::static_for<E>(std::forward<F>(f), std::make_index_sequence<count>());
|
||||
}
|
||||
|
||||
/// Enable printing enum values as strings via fmt + magic_enum
|
||||
template <is_enum T>
|
||||
struct fmt::formatter<T> : fmt::formatter<std::string_view>
|
||||
{
|
||||
constexpr auto format(T value, auto& format_context)
|
||||
{
|
||||
return formatter<string_view>::format(magic_enum::enum_name(value), format_context);
|
||||
}
|
||||
};
|
@ -16,6 +16,10 @@ extern "C"
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) && __clang_major__ >= 13
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
|
@ -41,22 +41,14 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
|
||||
template <typename T>
|
||||
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
||||
|
||||
template <class T> concept is_integer =
|
||||
std::is_integral_v<T>
|
||||
|| std::is_same_v<T, Int128>
|
||||
|| std::is_same_v<T, UInt128>
|
||||
|| std::is_same_v<T, Int256>
|
||||
|| std::is_same_v<T, UInt256>;
|
||||
|
||||
/// TODO: is_integral includes char, char8_t and wchar_t.
|
||||
template <typename T>
|
||||
struct is_integer
|
||||
{
|
||||
static constexpr bool value = std::is_integral_v<T>;
|
||||
};
|
||||
|
||||
template <> struct is_integer<Int128> { static constexpr bool value = true; };
|
||||
template <> struct is_integer<UInt128> { static constexpr bool value = true; };
|
||||
template <> struct is_integer<Int256> { static constexpr bool value = true; };
|
||||
template <> struct is_integer<UInt256> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_integer_v = is_integer<T>::value;
|
||||
|
||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
||||
|
||||
template <typename T>
|
||||
struct is_arithmetic
|
||||
|
@ -36,18 +36,7 @@
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template <char s0>
|
||||
inline bool is_in(char x)
|
||||
{
|
||||
return x == s0;
|
||||
}
|
||||
|
||||
template <char s0, char s1, char... tail>
|
||||
inline bool is_in(char x)
|
||||
{
|
||||
return x == s0 || is_in<s1, tail...>(x);
|
||||
}
|
||||
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); }
|
||||
|
||||
#if defined(__SSE2__)
|
||||
template <char s0>
|
||||
@ -67,16 +56,10 @@ inline __m128i mm_is_in(__m128i bytes)
|
||||
#endif
|
||||
|
||||
template <bool positive>
|
||||
bool maybe_negate(bool x)
|
||||
{
|
||||
if constexpr (positive)
|
||||
return x;
|
||||
else
|
||||
return !x;
|
||||
}
|
||||
constexpr bool maybe_negate(bool x) { return x == positive; }
|
||||
|
||||
template <bool positive>
|
||||
uint16_t maybe_negate(uint16_t x)
|
||||
constexpr uint16_t maybe_negate(uint16_t x)
|
||||
{
|
||||
if constexpr (positive)
|
||||
return x;
|
||||
@ -149,12 +132,13 @@ template <bool positive, ReturnMode return_mode, size_t num_chars,
|
||||
char c05 = 0, char c06 = 0, char c07 = 0, char c08 = 0,
|
||||
char c09 = 0, char c10 = 0, char c11 = 0, char c12 = 0,
|
||||
char c13 = 0, char c14 = 0, char c15 = 0, char c16 = 0>
|
||||
inline const char * find_first_symbols_sse42_impl(const char * const begin, const char * const end)
|
||||
inline const char * find_first_symbols_sse42(const char * const begin, const char * const end)
|
||||
{
|
||||
const char * pos = begin;
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
#define MODE (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
|
||||
constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT;
|
||||
|
||||
__m128i set = _mm_setr_epi8(c01, c02, c03, c04, c05, c06, c07, c08, c09, c10, c11, c12, c13, c14, c15, c16);
|
||||
|
||||
for (; pos + 15 < end; pos += 16)
|
||||
@ -163,16 +147,15 @@ inline const char * find_first_symbols_sse42_impl(const char * const begin, cons
|
||||
|
||||
if constexpr (positive)
|
||||
{
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, MODE))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, MODE);
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, MODE | _SIDD_NEGATIVE_POLARITY))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, MODE | _SIDD_NEGATIVE_POLARITY);
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY);
|
||||
}
|
||||
}
|
||||
#undef MODE
|
||||
#endif
|
||||
|
||||
for (; pos < end; ++pos)
|
||||
@ -197,20 +180,15 @@ inline const char * find_first_symbols_sse42_impl(const char * const begin, cons
|
||||
}
|
||||
|
||||
|
||||
template <bool positive, ReturnMode return_mode, char... symbols>
|
||||
inline const char * find_first_symbols_sse42(const char * begin, const char * end)
|
||||
{
|
||||
return find_first_symbols_sse42_impl<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
||||
}
|
||||
|
||||
/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do.
|
||||
|
||||
template <bool positive, ReturnMode return_mode, char... symbols>
|
||||
inline const char * find_first_symbols_dispatch(const char * begin, const char * end)
|
||||
requires(0 <= sizeof...(symbols) && sizeof...(symbols) <= 16)
|
||||
{
|
||||
#if defined(__SSE4_2__)
|
||||
if (sizeof...(symbols) >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode, symbols...>(begin, end);
|
||||
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
|
@ -1,3 +1,7 @@
|
||||
#if defined(__clang__) && __clang_major__ >= 13
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
/// This code was based on the code by Fedor Korotkiy (prime@yandex-team.ru) for YT product in Yandex.
|
||||
|
||||
#include <common/defines.h>
|
||||
|
@ -15,15 +15,15 @@ private:
|
||||
public:
|
||||
using UnderlyingType = T;
|
||||
template <class Enable = typename std::is_copy_constructible<T>::type>
|
||||
explicit StrongTypedef(const T & t_) : t(t_) {}
|
||||
constexpr explicit StrongTypedef(const T & t_) : t(t_) {}
|
||||
template <class Enable = typename std::is_move_constructible<T>::type>
|
||||
explicit StrongTypedef(T && t_) : t(std::move(t_)) {}
|
||||
constexpr explicit StrongTypedef(T && t_) : t(std::move(t_)) {}
|
||||
|
||||
template <class Enable = typename std::is_default_constructible<T>::type>
|
||||
StrongTypedef(): t() {}
|
||||
constexpr StrongTypedef(): t() {}
|
||||
|
||||
StrongTypedef(const Self &) = default;
|
||||
StrongTypedef(Self &&) = default;
|
||||
constexpr StrongTypedef(const Self &) = default;
|
||||
constexpr StrongTypedef(Self &&) = default;
|
||||
|
||||
Self & operator=(const Self &) = default;
|
||||
Self & operator=(Self &&) = default;
|
||||
|
@ -1,6 +1,10 @@
|
||||
#pragma once
|
||||
#include <cstddef>
|
||||
|
||||
#if defined(__clang__) && __clang_major__ >= 13
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
constexpr size_t KiB = 1024;
|
||||
constexpr size_t MiB = 1024 * KiB;
|
||||
constexpr size_t GiB = 1024 * MiB;
|
||||
|
@ -1,3 +1,7 @@
|
||||
#if defined(__clang__) && __clang_major__ >= 13
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
#include <daemon/BaseDaemon.h>
|
||||
#include <daemon/SentryWriter.h>
|
||||
|
||||
|
@ -49,6 +49,8 @@ if (NOT USE_INTERNAL_MYSQL_LIBRARY AND OPENSSL_INCLUDE_DIR)
|
||||
target_include_directories (mysqlxx SYSTEM PRIVATE ${OPENSSL_INCLUDE_DIR})
|
||||
endif ()
|
||||
|
||||
target_no_warning(mysqlxx reserved-macro-identifier)
|
||||
|
||||
if (NOT USE_INTERNAL_MYSQL_LIBRARY AND USE_STATIC_LIBRARIES)
|
||||
message(WARNING "Statically linking with system mysql/mariadb only works "
|
||||
"if mysql client libraries are built with same openssl version as "
|
||||
|
@ -189,7 +189,7 @@ public:
|
||||
~Pool();
|
||||
|
||||
/// Allocates connection.
|
||||
Entry get(uint64_t wait_timeout);
|
||||
Entry get(uint64_t wait_timeout = UINT64_MAX);
|
||||
|
||||
/// Allocates connection.
|
||||
/// If database is not accessible, returns empty Entry object.
|
||||
|
@ -79,7 +79,7 @@ PoolWithFailover PoolFactory::get(const Poco::Util::AbstractConfiguration & conf
|
||||
std::lock_guard<std::mutex> lock(impl->mutex);
|
||||
if (auto entry = impl->pools.find(config_name); entry != impl->pools.end())
|
||||
{
|
||||
return *(entry->second.get());
|
||||
return *(entry->second);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -100,7 +100,7 @@ PoolWithFailover PoolFactory::get(const Poco::Util::AbstractConfiguration & conf
|
||||
impl->pools.insert_or_assign(config_name, pool);
|
||||
impl->pools_by_ids.insert_or_assign(entry_name, config_name);
|
||||
}
|
||||
return *(pool.get());
|
||||
return *pool;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,10 @@
|
||||
#define _PATH_TTY "/dev/tty"
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) && __clang_major__ >= 13
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
#include <termios.h>
|
||||
#include <signal.h>
|
||||
#include <ctype.h>
|
||||
|
@ -6,7 +6,7 @@ if (ENABLE_CLANG_TIDY)
|
||||
message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.")
|
||||
endif()
|
||||
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-13" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
|
||||
|
||||
if (CLANG_TIDY_PATH)
|
||||
message(STATUS
|
||||
|
@ -4,3 +4,4 @@ include (CheckCCompilerFlag)
|
||||
check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE)
|
||||
check_cxx_compiler_flag("-Wshadow" HAS_SHADOW)
|
||||
check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE)
|
||||
check_cxx_compiler_flag("-Xclang -fuse-ctor-homing" HAS_USE_CTOR_HOMING)
|
||||
|
@ -1,8 +1,10 @@
|
||||
if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
set (USE_EMBEDDED_COMPILER 0)
|
||||
|
@ -192,4 +192,29 @@ elseif (COMPILER_GCC)
|
||||
# For some reason (bug in gcc?) macro 'GCC diagnostic ignored "-Wstringop-overflow"' doesn't help.
|
||||
add_cxx_compile_options(-Wno-stringop-overflow)
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11)
|
||||
# reinterpretAs.cpp:182:31: error: ‘void* memcpy(void*, const void*, size_t)’ copying an object of non-trivial type
|
||||
# ‘using ToFieldType = using FieldType = using UUID = struct StrongTypedef<wide::integer<128, unsigned int>, DB::UUIDTag>’
|
||||
# {aka ‘struct StrongTypedef<wide::integer<128, unsigned int>, DB::UUIDTag>’} from an array of ‘const char8_t’
|
||||
add_cxx_compile_options(-Wno-error=class-memaccess)
|
||||
|
||||
# Maybe false positive...
|
||||
# In file included from /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/memory:673,
|
||||
# In function ‘void std::__1::__libcpp_operator_delete(_Args ...) [with _Args = {void*, long unsigned int}]’,
|
||||
# inlined from ‘void std::__1::__do_deallocate_handle_size(void*, size_t, _Args ...) [with _Args = {}]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:271:34,
|
||||
# inlined from ‘void std::__1::__libcpp_deallocate(void*, size_t, size_t)’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:285:41,
|
||||
# inlined from ‘constexpr void std::__1::allocator<_Tp>::deallocate(_Tp*, size_t) [with _Tp = char]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/memory:849:39,
|
||||
# inlined from ‘static constexpr void std::__1::allocator_traits<_Alloc>::deallocate(std::__1::allocator_traits<_Alloc>::allocator_type&, std::__1::allocator_traits<_Alloc>::pointer, std::__1::allocator_traits<_Alloc>::size_type) [with _Alloc = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/__memory/allocator_traits.h:476:24,
|
||||
# inlined from ‘std::__1::basic_string<_CharT, _Traits, _Allocator>::~basic_string() [with _CharT = char; _Traits = std::__1::char_traits<char>; _Allocator = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/string:2219:35,
|
||||
# inlined from ‘std::__1::basic_string<_CharT, _Traits, _Allocator>::~basic_string() [with _CharT = char; _Traits = std::__1::char_traits<char>; _Allocator = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/string:2213:1,
|
||||
# inlined from ‘DB::JSONBuilder::JSONMap::Pair::~Pair()’ at /home/jakalletti/ClickHouse/ClickHouse/src/Common/JSONBuilder.h:90:12,
|
||||
# inlined from ‘void DB::JSONBuilder::JSONMap::add(std::__1::string, DB::JSONBuilder::ItemPtr)’ at /home/jakalletti/ClickHouse/ClickHouse/src/Common/JSONBuilder.h:97:68,
|
||||
# inlined from ‘virtual void DB::ExpressionStep::describeActions(DB::JSONBuilder::JSONMap&) const’ at /home/jakalletti/ClickHouse/ClickHouse/src/Processors/QueryPlan/ExpressionStep.cpp:102:12:
|
||||
# /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:247:20: error: ‘void operator delete(void*, size_t)’ called on a pointer to an unallocated object ‘7598543875853023301’ [-Werror=free-nonheap-object]
|
||||
add_cxx_compile_options(-Wno-error=free-nonheap-object)
|
||||
|
||||
# AggregateFunctionAvg.h:203:100: error: ‘this’ pointer is null [-Werror=nonnull]
|
||||
add_cxx_compile_options(-Wno-error=nonnull)
|
||||
endif()
|
||||
endif ()
|
||||
|
12
contrib/CMakeLists.txt
vendored
12
contrib/CMakeLists.txt
vendored
@ -33,6 +33,7 @@ endif()
|
||||
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
|
||||
|
||||
add_subdirectory (abseil-cpp-cmake)
|
||||
add_subdirectory (magic-enum-cmake)
|
||||
add_subdirectory (boost-cmake)
|
||||
add_subdirectory (cctz-cmake)
|
||||
add_subdirectory (consistent-hashing)
|
||||
@ -206,12 +207,14 @@ elseif(GTEST_SRC_DIR)
|
||||
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
|
||||
endif()
|
||||
|
||||
if (USE_EMBEDDED_COMPILER)
|
||||
function(add_llvm)
|
||||
# ld: unknown option: --color-diagnostics
|
||||
if (APPLE)
|
||||
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
# Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (CMAKE_INSTALL_RPATH "ON")
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
@ -219,13 +222,12 @@ if (USE_EMBEDDED_COMPILER)
|
||||
|
||||
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
|
||||
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
|
||||
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
add_subdirectory (llvm/llvm)
|
||||
|
||||
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||
unset (CMAKE_CXX_STANDARD_bak)
|
||||
endfunction()
|
||||
if (USE_EMBEDDED_COMPILER)
|
||||
add_llvm()
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_LIBGSASL_LIBRARY)
|
||||
|
2
contrib/abseil-cpp
vendored
2
contrib/abseil-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit 4f3b686f86c3ebaba7e4e926e62a79cb1c659a54
|
||||
Subproject commit b004a8a02418b83de8b686caa0b0f6e39ac2191f
|
2
contrib/fastops
vendored
2
contrib/fastops
vendored
@ -1 +1 @@
|
||||
Subproject commit 88752a5e03cf34639a4a37a4b41d8b463fffd2b5
|
||||
Subproject commit 012b777df9e2d145a24800a6c8c3d4a0249bb09e
|
2
contrib/llvm
vendored
2
contrib/llvm
vendored
@ -1 +1 @@
|
||||
Subproject commit e5751459412bce1391fb7a2e9bbc01e131bf72f1
|
||||
Subproject commit f30bbecef78b75b527e257c1304d0be2f2f95975
|
3
contrib/magic-enum-cmake/CMakeLists.txt
Normal file
3
contrib/magic-enum-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,3 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/magic_enum")
|
||||
add_library (magic_enum INTERFACE)
|
||||
target_include_directories(magic_enum INTERFACE ${LIBRARY_DIR}/include)
|
1
contrib/magic_enum
vendored
Submodule
1
contrib/magic_enum
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 38f86e4d093cfc9034a140d37de2168e3951bef3
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit b6480c69bf3ab6e298e0d019a07fd4f69029b26a
|
||||
Subproject commit 5ea892c8673e6c5a052887653673b967d44cc59b
|
11
debian/clickhouse-server.init
vendored
11
debian/clickhouse-server.init
vendored
@ -3,10 +3,17 @@
|
||||
# Provides: clickhouse-server
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Required-Start: $network
|
||||
# Required-Stop: $network
|
||||
# Should-Start: $time $network
|
||||
# Should-Stop: $network
|
||||
# Short-Description: Yandex clickhouse-server daemon
|
||||
### END INIT INFO
|
||||
#
|
||||
# NOTES:
|
||||
# - Should-* -- script can start if the listed facilities are missing, unlike Required-*
|
||||
#
|
||||
# For the documentation [1]:
|
||||
#
|
||||
# [1]: https://wiki.debian.org/LSBInitScripts
|
||||
|
||||
CLICKHOUSE_USER=clickhouse
|
||||
CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
|
||||
|
8
debian/clickhouse-server.service
vendored
8
debian/clickhouse-server.service
vendored
@ -1,7 +1,12 @@
|
||||
[Unit]
|
||||
Description=ClickHouse Server (analytic DBMS for big data)
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure
|
||||
# that the time was adjusted already, if you use systemd-timesyncd you are
|
||||
# safe, but if you use ntp or some other daemon, you should configure it
|
||||
# additionaly.
|
||||
After=time-sync.target network-online.target
|
||||
Wants=time-sync.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
@ -16,4 +21,5 @@ LimitNOFILE=500000
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
|
||||
|
||||
[Install]
|
||||
# ClickHouse should not start from the rescue shell (rescue.target).
|
||||
WantedBy=multi-user.target
|
||||
|
4
debian/rules
vendored
4
debian/rules
vendored
@ -36,8 +36,8 @@ endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-10 g++-9 g++ | head -n1)
|
||||
DEB_CC ?= $(shell which gcc-11 gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-11 g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
|
@ -1,6 +1,6 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -4,7 +4,7 @@ set -e
|
||||
#ccache -s # uncomment to display CCache statistics
|
||||
mkdir -p /server/build_docker
|
||||
cd /server/build_docker
|
||||
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-12)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-12)"
|
||||
cmake -G Ninja /server "-DCMAKE_C_COMPILER=$(command -v clang-13)" "-DCMAKE_CXX_COMPILER=$(command -v clang++-13)"
|
||||
|
||||
# Set the number of build jobs to the half of number of virtual CPU cores (rounded up).
|
||||
# By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time.
|
||||
|
@ -1,12 +1,12 @@
|
||||
{
|
||||
"docker/packager/deb": {
|
||||
"name": "yandex/clickhouse-deb-builder",
|
||||
"name": "clickhouse/deb-builder",
|
||||
"dependent": [
|
||||
"docker/packager/unbundled"
|
||||
]
|
||||
},
|
||||
"docker/packager/binary": {
|
||||
"name": "yandex/clickhouse-binary-builder",
|
||||
"name": "clickhouse/binary-builder",
|
||||
"dependent": [
|
||||
"docker/test/split_build_smoke_test",
|
||||
"docker/test/pvs",
|
||||
@ -14,155 +14,150 @@
|
||||
]
|
||||
},
|
||||
"docker/packager/unbundled": {
|
||||
"name": "yandex/clickhouse-unbundled-builder",
|
||||
"name": "clickhouse/unbundled-builder",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/compatibility/centos": {
|
||||
"name": "yandex/clickhouse-test-old-centos",
|
||||
"name": "clickhouse/test-old-centos",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/compatibility/ubuntu": {
|
||||
"name": "yandex/clickhouse-test-old-ubuntu",
|
||||
"name": "clickhouse/test-old-ubuntu",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/base": {
|
||||
"name": "yandex/clickhouse-integration-test",
|
||||
"name": "clickhouse/integration-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/fuzzer": {
|
||||
"name": "yandex/clickhouse-fuzzer",
|
||||
"name": "clickhouse/fuzzer",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/performance-comparison": {
|
||||
"name": "yandex/clickhouse-performance-comparison",
|
||||
"name": "clickhouse/performance-comparison",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/pvs": {
|
||||
"name": "yandex/clickhouse-pvs-test",
|
||||
"name": "clickhouse/pvs-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/stateless": {
|
||||
"name": "yandex/clickhouse-stateless-test",
|
||||
"name": "clickhouse/stateless-test",
|
||||
"dependent": [
|
||||
"docker/test/stateful",
|
||||
"docker/test/coverage",
|
||||
"docker/test/unit"
|
||||
]
|
||||
},
|
||||
"docker/test/stateless_pytest": {
|
||||
"name": "yandex/clickhouse-stateless-pytest",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/stateful": {
|
||||
"name": "yandex/clickhouse-stateful-test",
|
||||
"name": "clickhouse/stateful-test",
|
||||
"dependent": [
|
||||
"docker/test/stress"
|
||||
]
|
||||
},
|
||||
"docker/test/coverage": {
|
||||
"name": "yandex/clickhouse-test-coverage",
|
||||
"name": "clickhouse/test-coverage",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/unit": {
|
||||
"name": "yandex/clickhouse-unit-test",
|
||||
"name": "clickhouse/unit-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/stress": {
|
||||
"name": "yandex/clickhouse-stress-test",
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/split_build_smoke_test": {
|
||||
"name": "yandex/clickhouse-split-build-smoke-test",
|
||||
"name": "clickhouse/split-build-smoke-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/codebrowser": {
|
||||
"name": "yandex/clickhouse-codebrowser",
|
||||
"name": "clickhouse/codebrowser",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/runner": {
|
||||
"name": "yandex/clickhouse-integration-tests-runner",
|
||||
"name": "clickhouse/integration-tests-runner",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/testflows/runner": {
|
||||
"name": "yandex/clickhouse-testflows-runner",
|
||||
"name": "clickhouse/testflows-runner",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/fasttest": {
|
||||
"name": "yandex/clickhouse-fasttest",
|
||||
"name": "clickhouse/fasttest",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/style": {
|
||||
"name": "yandex/clickhouse-style-test",
|
||||
"name": "clickhouse/style-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/s3_proxy": {
|
||||
"name": "yandex/clickhouse-s3-proxy",
|
||||
"name": "clickhouse/s3-proxy",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/resolver": {
|
||||
"name": "yandex/clickhouse-python-bottle",
|
||||
"name": "clickhouse/python-bottle",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/helper_container": {
|
||||
"name": "yandex/clickhouse-integration-helper",
|
||||
"name": "clickhouse/integration-helper",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/mysql_golang_client": {
|
||||
"name": "yandex/clickhouse-mysql-golang-client",
|
||||
"name": "clickhouse/mysql-golang-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/mysql_java_client": {
|
||||
"name": "yandex/clickhouse-mysql-java-client",
|
||||
"name": "clickhouse/mysql-java-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/mysql_js_client": {
|
||||
"name": "yandex/clickhouse-mysql-js-client",
|
||||
"name": "clickhouse/mysql-js-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/mysql_php_client": {
|
||||
"name": "yandex/clickhouse-mysql-php-client",
|
||||
"name": "clickhouse/mysql-php-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/postgresql_java_client": {
|
||||
"name": "yandex/clickhouse-postgresql-java-client",
|
||||
"name": "clickhouse/postgresql-java-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/kerberos_kdc": {
|
||||
"name": "yandex/clickhouse-kerberos-kdc",
|
||||
"name": "clickhouse/kerberos-kdc",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/base": {
|
||||
"name": "yandex/clickhouse-test-base",
|
||||
"name": "clickhouse/test-base",
|
||||
"dependent": [
|
||||
"docker/test/stateless",
|
||||
"docker/test/stateless_unbundled",
|
||||
"docker/test/stateless_pytest",
|
||||
"docker/test/integration/base",
|
||||
"docker/test/fuzzer",
|
||||
"docker/test/keeper-jepsen"
|
||||
]
|
||||
},
|
||||
"docker/packager/unbundled": {
|
||||
"name": "yandex/clickhouse-unbundled-builder",
|
||||
"name": "clickhouse/unbundled-builder",
|
||||
"dependent": [
|
||||
"docker/test/stateless_unbundled"
|
||||
]
|
||||
},
|
||||
"docker/test/stateless_unbundled": {
|
||||
"name": "yandex/clickhouse-stateless-unbundled-test",
|
||||
"name": "clickhouse/stateless-unbundled-test",
|
||||
"dependent": [
|
||||
]
|
||||
},
|
||||
"docker/test/integration/kerberized_hadoop": {
|
||||
"name": "yandex/clickhouse-kerberized-hadoop",
|
||||
"name": "clickhouse/kerberized-hadoop",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/sqlancer": {
|
||||
"name": "yandex/clickhouse-sqlancer-test",
|
||||
"name": "clickhouse/sqlancer-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/keeper-jepsen": {
|
||||
"name": "yandex/clickhouse-keeper-jepsen-test",
|
||||
"name": "clickhouse/keeper-jepsen-test",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-binary-builder .
|
||||
# docker build -t clickhouse/binary-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
@ -41,18 +41,14 @@ RUN apt-get update \
|
||||
ccache \
|
||||
cmake \
|
||||
curl \
|
||||
g++-10 \
|
||||
gcc-10 \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
clang-12 \
|
||||
clang-tidy-12 \
|
||||
lld-12 \
|
||||
llvm-12 \
|
||||
llvm-12-dev \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
moreutils \
|
||||
@ -104,15 +100,10 @@ RUN wget -nv "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.0
|
||||
# Download toolchain for FreeBSD 11.3
|
||||
RUN wget -nv https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-11.3-toolchain.tar.xz
|
||||
|
||||
# NOTE: For some reason we have outdated version of gcc-10 in ubuntu 20.04 stable.
|
||||
# Current workaround is to use latest version proposed repo. Remove as soon as
|
||||
# gcc-10.2 appear in stable repo.
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu/ focal-proposed restricted main multiverse universe' > /etc/apt/sources.list.d/proposed-repositories.list
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install gcc-10 g++-10 --yes
|
||||
|
||||
RUN rm /etc/apt/sources.list.d/proposed-repositories.list && apt-get update
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get update \
|
||||
&& apt-get install gcc-11 g++-11 --yes
|
||||
|
||||
|
||||
COPY build.sh /
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-deb-builder .
|
||||
# docker build -t clickhouse/deb-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
@ -37,17 +37,17 @@ RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
alien \
|
||||
clang-12 \
|
||||
clang-tidy-12 \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
cmake \
|
||||
debhelper \
|
||||
devscripts \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
lld-12 \
|
||||
llvm-12 \
|
||||
llvm-12-dev \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
perl \
|
||||
@ -57,15 +57,11 @@ RUN apt-get update \
|
||||
tzdata \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# NOTE: For some reason we have outdated version of gcc-10 in ubuntu 20.04 stable.
|
||||
# Current workaround is to use latest version proposed repo. Remove as soon as
|
||||
# gcc-10.2 appear in stable repo.
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu/ focal-proposed restricted main multiverse universe' > /etc/apt/sources.list.d/proposed-repositories.list
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get update \
|
||||
&& apt-get install gcc-11 g++-11 --yes
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install gcc-10 g++-10 --yes --no-install-recommends
|
||||
|
||||
RUN rm /etc/apt/sources.list.d/proposed-repositories.list && apt-get update
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
|
@ -9,9 +9,9 @@ import sys
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
|
||||
IMAGE_MAP = {
|
||||
"deb": "yandex/clickhouse-deb-builder",
|
||||
"binary": "yandex/clickhouse-binary-builder",
|
||||
"unbundled": "yandex/clickhouse-unbundled-builder"
|
||||
"deb": "clickhouse/deb-builder",
|
||||
"binary": "clickhouse/binary-builder",
|
||||
"unbundled": "clickhouse/unbundled-builder"
|
||||
}
|
||||
|
||||
def check_image_exists_locally(image_name):
|
||||
@ -205,7 +205,8 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "gcc-10"), default="clang-12")
|
||||
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13")
|
||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||
parser.add_argument("--unbundled", action="store_true")
|
||||
parser.add_argument("--split-binary", action="store_true")
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-unbundled-builder .
|
||||
FROM yandex/clickhouse-deb-builder
|
||||
# docker build -t clickhouse/unbundled-builder .
|
||||
FROM clickhouse/deb-builder
|
||||
|
||||
RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& wget -nv -O /tmp/arrow-keyring.deb "https://apache.jfrog.io/artifactory/arrow/ubuntu/apache-arrow-apt-source-latest-${CODENAME}.deb" \
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-test-base .
|
||||
# docker build -t clickhouse/test-base .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# docker build --network=host -t yandex/clickhouse-codebrowser .
|
||||
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output yandex/clickhouse-codebrowser
|
||||
FROM yandex/clickhouse-binary-builder
|
||||
# docker build --network=host -t clickhouse/codebrowser .
|
||||
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
|
||||
FROM clickhouse/binary-builder
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
@ -11,7 +11,7 @@ RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libl
|
||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||
RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser
|
||||
|
||||
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-12 -DCMAKE_C_COMPILER=clang-12 && make -j
|
||||
RUN cd woboq_codebrowser && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-13 -DCMAKE_C_COMPILER=clang-13 && make -j
|
||||
|
||||
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
||||
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
||||
@ -24,7 +24,7 @@ ENV SHA=nosha
|
||||
ENV DATA="data"
|
||||
|
||||
CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \
|
||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-12 -DCMAKE_C_COMPILER=/usr/bin/clang-12 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-13 -DCMAKE_C_COMPILER=/usr/bin/clang-13 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||
mkdir -p $HTML_RESULT_DIRECTORY && \
|
||||
$CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
||||
cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-test-old-centos .
|
||||
# docker build -t clickhouse/test-old-centos .
|
||||
FROM centos:5
|
||||
|
||||
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-test-old-ubuntu .
|
||||
# docker build -t clickhouse/test-old-ubuntu .
|
||||
FROM ubuntu:12.04
|
||||
|
||||
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-test-coverage .
|
||||
FROM yandex/clickhouse-stateless-test
|
||||
# docker build -t clickhouse/test-coverage .
|
||||
FROM clickhouse/stateless-test
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -80,7 +80,7 @@ LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAM
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test -j 8 --testname --shard --zookeeper --print-time --use-skip-list 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_result.txt
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test -j 8 --testname --shard --zookeeper --print-time 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_result.txt
|
||||
|
||||
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "/test_result.txt")
|
||||
|
||||
@ -97,7 +97,7 @@ then
|
||||
|
||||
echo "Going to run again: ${FAILED_TESTS[*]}"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test --order=random --testname --shard --zookeeper --use-skip-list "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_result.txt
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test --order=random --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_result.txt
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-fasttest .
|
||||
# docker build -t clickhouse/fasttest .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=12
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
||||
stage=${stage:-}
|
||||
|
||||
# Compiler version, normally set by Dockerfile
|
||||
export LLVM_VERSION=${LLVM_VERSION:-12}
|
||||
export LLVM_VERSION=${LLVM_VERSION:-13}
|
||||
|
||||
# A variable to pass additional flags to CMake.
|
||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||
@ -159,6 +159,7 @@ function clone_submodules
|
||||
cd "$FASTTEST_SOURCE"
|
||||
|
||||
SUBMODULES_TO_UPDATE=(
|
||||
contrib/magic_enum
|
||||
contrib/abseil-cpp
|
||||
contrib/boost
|
||||
contrib/zlib-ng
|
||||
@ -261,153 +262,8 @@ function run_tests
|
||||
|
||||
start_server
|
||||
|
||||
TESTS_TO_SKIP=(
|
||||
00105_shard_collations
|
||||
00109_shard_totals_after_having
|
||||
00110_external_sort
|
||||
00302_http_compression
|
||||
00417_kill_query
|
||||
00436_convert_charset
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00652_replicated_mutations_zookeeper
|
||||
00682_empty_parts_merge
|
||||
00701_rollup
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
00911_tautological_compare
|
||||
|
||||
# Hyperscan
|
||||
00926_multimatch
|
||||
00929_multi_match_edit_distance
|
||||
01681_hyperscan_debug_assertion
|
||||
02004_max_hyperscan_regex_length
|
||||
|
||||
01176_mysql_client_interactive # requires mysql client
|
||||
01031_mutations_interpreter_and_context
|
||||
01053_ssd_dictionary # this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled
|
||||
01083_expressions_in_engine_arguments
|
||||
01092_memory_profiler
|
||||
01098_msgpack_format
|
||||
01098_temporary_and_external_tables
|
||||
01103_check_cpu_instructions_at_startup # avoid dependency on qemu -- invonvenient when running locally
|
||||
01193_metadata_loading
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01251_dict_is_in_infinite_loop
|
||||
01259_dictionary_custom_settings_ddl
|
||||
01268_dictionary_direct_layout
|
||||
01280_ssd_complex_key_dictionary
|
||||
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01318_encrypt # Depends on OpenSSL
|
||||
01318_decrypt # Depends on OpenSSL
|
||||
01663_aes_msan # Depends on OpenSSL
|
||||
01667_aes_args_check # Depends on OpenSSL
|
||||
01683_codec_encrypted # Depends on OpenSSL
|
||||
01776_decrypt_aead_size_check # Depends on OpenSSL
|
||||
01811_filter_by_null # Depends on OpenSSL
|
||||
02012_sha512_fixedstring # Depends on OpenSSL
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
01292_create_user
|
||||
01294_lazy_database_concurrent
|
||||
01305_replica_create_drop_zookeeper
|
||||
01354_order_by_tuple_collate_const
|
||||
01355_ilike
|
||||
01411_bayesian_ab_testing
|
||||
01798_uniq_theta_sketch
|
||||
01799_long_uniq_theta_sketch
|
||||
01890_stem # depends on libstemmer_c
|
||||
02003_compress_bz2 # depends on bzip2
|
||||
01059_storage_file_compression # depends on brotli and bzip2
|
||||
collate
|
||||
collation
|
||||
_orc_
|
||||
arrow
|
||||
avro
|
||||
base64
|
||||
brotli
|
||||
capnproto
|
||||
client
|
||||
ddl_dictionaries
|
||||
h3
|
||||
hashing
|
||||
hdfs
|
||||
java_hash
|
||||
json
|
||||
limit_memory
|
||||
live_view
|
||||
memory_leak
|
||||
memory_limit
|
||||
mysql
|
||||
odbc
|
||||
parallel_alter
|
||||
parquet
|
||||
protobuf
|
||||
secure
|
||||
sha256
|
||||
xz
|
||||
|
||||
# Not sure why these two fail even in sequential mode. Disabled for now
|
||||
# to make some progress.
|
||||
00646_url_engine
|
||||
00974_query_profiler
|
||||
|
||||
# In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default
|
||||
01504_rocksdb
|
||||
01686_rocksdb
|
||||
|
||||
# Look at DistributedFilesToInsert, so cannot run in parallel.
|
||||
01460_DistributedFilesToInsert
|
||||
|
||||
01541_max_memory_usage_for_user_long
|
||||
|
||||
# Require python libraries like scipy, pandas and numpy
|
||||
01322_ttest_scipy
|
||||
01561_mann_whitney_scipy
|
||||
|
||||
01545_system_errors
|
||||
# Checks system.errors
|
||||
01563_distributed_query_finish
|
||||
|
||||
# nc - command not found
|
||||
01601_proxy_protocol
|
||||
01622_defaults_for_url_engine
|
||||
|
||||
# JSON functions
|
||||
01666_blns
|
||||
|
||||
# Requires postgresql-client
|
||||
01802_test_postgresql_protocol_with_row_policy
|
||||
|
||||
# Depends on AWS
|
||||
01801_s3_cluster
|
||||
02012_settings_clause_for_s3
|
||||
|
||||
# needs psql
|
||||
01889_postgresql_protocol_null_fields
|
||||
|
||||
# needs pv
|
||||
01923_network_receive_time_metric_insert
|
||||
|
||||
01889_sqlite_read_write
|
||||
|
||||
# needs s2
|
||||
01849_geoToS2
|
||||
01851_s2_to_geo
|
||||
01852_s2_get_neighbours
|
||||
01853_s2_cells_intersect
|
||||
01854_s2_cap_contains
|
||||
01854_s2_cap_union
|
||||
|
||||
# needs s3
|
||||
01944_insert_partition_by
|
||||
|
||||
# depends on Go
|
||||
02013_zlib_read_after_eof
|
||||
|
||||
# Accesses CH via mysql table function (which is unavailable)
|
||||
01747_system_session_log_long
|
||||
)
|
||||
|
||||
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
||||
--no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" \
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-fuzzer .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/fuzzer .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV TZ=Europe/Moscow
|
||||
@ -36,5 +36,5 @@ CMD set -o pipefail \
|
||||
&& cd /workspace \
|
||||
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer
|
||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
|
||||
|
||||
|
@ -12,7 +12,7 @@ stage=${stage:-}
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
echo "$script_dir"
|
||||
repo_dir=ch
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-12_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
|
||||
function clone
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-integration-test .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/integration-test .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-integration-helper .
|
||||
# docker build -t clickhouse/integration-helper .
|
||||
# Helper docker container to run iptables without sudo
|
||||
|
||||
FROM alpine
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-kerberized-hadoop .
|
||||
# docker build -t clickhouse/kerberized-hadoop .
|
||||
|
||||
FROM sequenceiq/hadoop-docker:2.7.0
|
||||
RUN sed -i -e 's/^\#baseurl/baseurl/' /etc/yum.repos.d/CentOS-Base.repo
|
||||
|
@ -1,9 +1,9 @@
|
||||
# docker build -t yandex/clickhouse-kerberos-kdc .
|
||||
# docker build -t clickhouse/kerberos-kdc .
|
||||
FROM centos:6
|
||||
|
||||
FROM centos:6.6
|
||||
# old OS to make is faster and smaller
|
||||
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
|
||||
|
||||
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
|
||||
RUN yum install -y ca-certificates krb5-server krb5-libs krb5-auth-dialog krb5-workstation
|
||||
|
||||
EXPOSE 88 749
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t yandex/clickhouse-mysql-golang-client .
|
||||
# docker build -t clickhouse/mysql-golang-client .
|
||||
# MySQL golang client docker container
|
||||
|
||||
FROM golang:1.12.2
|
||||
FROM golang:1.13
|
||||
|
||||
RUN go get "github.com/go-sql-driver/mysql"
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-mysql-java-client .
|
||||
# docker build -t clickhouse/mysql-java-client .
|
||||
# MySQL Java client docker container
|
||||
|
||||
FROM ubuntu:18.04
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-mysql-js-client .
|
||||
# docker build -t clickhouse/mysql-js-client .
|
||||
# MySQL JavaScript client docker container
|
||||
|
||||
FROM node:8
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-mysql-php-client .
|
||||
# docker build -t clickhouse/mysql-php-client .
|
||||
# MySQL PHP client docker container
|
||||
|
||||
FROM php:7.3-cli
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-postgresql-java-client .
|
||||
# docker build -t clickhouse/postgresql-java-client .
|
||||
# PostgreSQL Java client docker container
|
||||
|
||||
FROM ubuntu:18.04
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-python-bottle .
|
||||
# docker build -t clickhouse/python-bottle .
|
||||
# Helper docker container to run python bottle apps
|
||||
|
||||
FROM python:3
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-integration-tests-runner .
|
||||
# docker build -t clickhouse/integration-tests-runner .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
bridge1:
|
||||
image: yandex/clickhouse-jdbc-bridge
|
||||
image: clickhouse/jdbc-bridge
|
||||
command: |
|
||||
/bin/bash -c 'cat << EOF > config/datasources/self.json
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
zoo1:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
image: ${image:-clickhouse/integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
@ -31,7 +31,7 @@ services:
|
||||
- inet6
|
||||
- rotate
|
||||
zoo2:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
image: ${image:-clickhouse/integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
@ -61,7 +61,7 @@ services:
|
||||
- inet6
|
||||
- rotate
|
||||
zoo3:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
image: ${image:-clickhouse/integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
|
@ -4,7 +4,7 @@ services:
|
||||
kerberizedhdfs1:
|
||||
cap_add:
|
||||
- DAC_READ_SEARCH
|
||||
image: yandex/clickhouse-kerberized-hadoop:16621
|
||||
image: clickhouse/kerberized-hadoop
|
||||
hostname: kerberizedhdfs1
|
||||
restart: always
|
||||
volumes:
|
||||
@ -22,7 +22,7 @@ services:
|
||||
entrypoint: /etc/bootstrap.sh -d
|
||||
|
||||
hdfskerberos:
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
|
||||
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
|
||||
hostname: hdfskerberos
|
||||
volumes:
|
||||
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab
|
||||
|
@ -50,7 +50,7 @@ services:
|
||||
- label:disable
|
||||
|
||||
kafka_kerberos:
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
|
||||
image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
|
||||
hostname: kafka_kerberos
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
|
||||
|
@ -19,14 +19,14 @@ services:
|
||||
|
||||
# HTTP proxies for Minio.
|
||||
proxy1:
|
||||
image: yandex/clickhouse-s3-proxy
|
||||
image: clickhouse/s3-proxy
|
||||
expose:
|
||||
- "8080" # Redirect proxy port
|
||||
- "80" # Reverse proxy port
|
||||
- "443" # Reverse proxy port (secure)
|
||||
|
||||
proxy2:
|
||||
image: yandex/clickhouse-s3-proxy
|
||||
image: clickhouse/s3-proxy
|
||||
expose:
|
||||
- "8080"
|
||||
- "80"
|
||||
@ -34,7 +34,7 @@ services:
|
||||
|
||||
# Empty container to run proxy resolver.
|
||||
resolver:
|
||||
image: yandex/clickhouse-python-bottle
|
||||
image: clickhouse/python-bottle
|
||||
expose:
|
||||
- "8080"
|
||||
tty: true
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
golang1:
|
||||
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
|
||||
image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
java1:
|
||||
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
|
||||
image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysqljs1:
|
||||
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
|
||||
image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
php1:
|
||||
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
|
||||
image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,6 +1,6 @@
|
||||
version: '2.2'
|
||||
services:
|
||||
java:
|
||||
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
|
||||
image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
|
||||
# to keep container running
|
||||
command: sleep infinity
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-s3-proxy .
|
||||
# docker build -t clickhouse/s3-proxy .
|
||||
FROM nginx:alpine
|
||||
|
||||
COPY run.sh /run.sh
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-keeper-jepsen-test .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/keeper-jepsen-test .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV CLOJURE_VERSION=1.10.3.814
|
||||
|
@ -2,7 +2,7 @@
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-12_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-performance-comparison .
|
||||
# docker build -t clickhouse/performance-comparison .
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
@ -54,4 +54,4 @@ COPY * /
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison
|
||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
||||
|
@ -116,7 +116,7 @@ pull requests (0 for master) manually.
|
||||
docker run --network=host --volume=$(pwd)/workspace:/workspace --volume=$(pwd)/output:/output
|
||||
[-e REF_PR={} -e REF_SHA={}]
|
||||
-e PR_TO_TEST={} -e SHA_TO_TEST={}
|
||||
yandex/clickhouse-performance-comparison
|
||||
clickhouse/performance-comparison
|
||||
```
|
||||
|
||||
Then see the `report.html` in the `output` directory.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# docker build -t yandex/clickhouse-pvs-test .
|
||||
# docker build -t clickhouse/pvs-test .
|
||||
|
||||
FROM yandex/clickhouse-binary-builder
|
||||
FROM clickhouse/binary-builder
|
||||
|
||||
RUN apt-get update --yes \
|
||||
&& apt-get install \
|
||||
@ -38,7 +38,7 @@ RUN set -x \
|
||||
&& dpkg -i "${PKG_VERSION}.deb"
|
||||
|
||||
CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang\+\+-12 \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang\+\+-13 \
|
||||
&& ninja re2_st clickhouse_grpc_protos \
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
||||
cp /repo_folder/pvs-studio.log /test_output; \
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-split-build-smoke-test .
|
||||
FROM yandex/clickhouse-binary-builder
|
||||
# docker build -t clickhouse/split-build-smoke-test .
|
||||
FROM clickhouse/binary-builder
|
||||
|
||||
COPY run.sh /run.sh
|
||||
COPY process_split_build_smoke_test_result.py /
|
||||
|
@ -1,9 +1,9 @@
|
||||
# docker build -t yandex/clickhouse-sqlancer-test .
|
||||
# docker build -t clickhouse/sqlancer-test .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
|
||||
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git default-jdk maven python3 --yes --no-install-recommends
|
||||
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
|
||||
RUN mkdir /sqlancer && \
|
||||
cd /sqlancer && \
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-stateful-test .
|
||||
FROM yandex/clickhouse-stateless-test
|
||||
# docker build -t clickhouse/stateful-test .
|
||||
FROM clickhouse/stateless-test
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -108,7 +108,7 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --use-skip-list --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-stateless-test .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/stateless-test .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||
|
||||
|
@ -97,7 +97,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
--use-skip-list --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-stateless-pytest .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/stateless-pytest .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-stateless-unbundled-test .
|
||||
FROM yandex/clickhouse-test-base
|
||||
# docker build -t clickhouse/stateless-unbundled-test .
|
||||
FROM clickhouse/test-base
|
||||
|
||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||
|
||||
|
@ -13,8 +13,4 @@ dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
clickhouse-test --testname --shard --zookeeper "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-stress-test .
|
||||
FROM yandex/clickhouse-stateful-test
|
||||
# docker build -t clickhouse/stress-test .
|
||||
FROM clickhouse/stateful-test
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -6,7 +6,7 @@ Usage:
|
||||
```
|
||||
$ ls $HOME/someclickhouse
|
||||
clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb clickhouse-test_18.14.9_all.deb
|
||||
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output yandex/clickhouse-stress-test
|
||||
$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test
|
||||
Selecting previously unselected package clickhouse-common-static.
|
||||
(Reading database ... 14442 files and directories currently installed.)
|
||||
...
|
||||
|
@ -10,14 +10,6 @@ import logging
|
||||
import time
|
||||
|
||||
|
||||
def get_skip_list_cmd(path):
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
if '--use-skip-list' in line:
|
||||
return '--use-skip-list'
|
||||
return ''
|
||||
|
||||
|
||||
def get_options(i):
|
||||
options = []
|
||||
client_options = []
|
||||
@ -56,8 +48,6 @@ def get_options(i):
|
||||
|
||||
|
||||
def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_time_limit):
|
||||
skip_list_opt = get_skip_list_cmd(cmd)
|
||||
|
||||
global_time_limit_option = ''
|
||||
if global_time_limit:
|
||||
global_time_limit_option = "--global_time_limit={}".format(global_time_limit)
|
||||
@ -66,7 +56,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t
|
||||
pipes = []
|
||||
for i in range(0, len(output_paths)):
|
||||
f = open(output_paths[i], 'w')
|
||||
full_command = "{} {} {} {} {}".format(cmd, skip_list_opt, get_options(i), global_time_limit_option, skip_tests_option)
|
||||
full_command = "{} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option)
|
||||
logging.info("Run func tests '%s'", full_command)
|
||||
p = Popen(full_command, shell=True, stdout=f, stderr=f)
|
||||
pipes.append(p)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-style-test .
|
||||
# docker build -t clickhouse/style-test .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
@ -49,7 +49,7 @@ fi
|
||||
|
||||
# Build server image (optional) from local packages
|
||||
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
|
||||
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
|
||||
CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local"
|
||||
|
||||
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
|
||||
docker build --network=host \
|
||||
|
@ -1,4 +1,4 @@
|
||||
# docker build -t yandex/clickhouse-testflows-runner .
|
||||
# docker build -t clickhouse/testflows-runner .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-unit-test .
|
||||
FROM yandex/clickhouse-stateless-test
|
||||
# docker build -t clickhouse/unit-test .
|
||||
FROM clickhouse/stateless-test
|
||||
|
||||
RUN apt-get install gdb
|
||||
|
||||
|
@ -76,7 +76,7 @@ cd ClickHouse
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-11 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-11 -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake --build . --config RelWithDebInfo
|
||||
cd ..
|
||||
```
|
||||
|
@ -23,7 +23,7 @@ $ sudo apt-get install git cmake python ninja-build
|
||||
|
||||
Or cmake3 instead of cmake on older systems.
|
||||
|
||||
### Install clang-12 (recommended) {#install-clang-12}
|
||||
### Install clang-13 (recommended) {#install-clang-13}
|
||||
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
@ -33,11 +33,11 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||
|
||||
#### Use clang-12 for Builds
|
||||
#### Use clang-13 for Builds
|
||||
|
||||
``` bash
|
||||
$ export CC=clang-12
|
||||
$ export CXX=clang++-12
|
||||
$ export CC=clang-13
|
||||
$ export CXX=clang++-13
|
||||
```
|
||||
|
||||
Gcc can also be used though it is discouraged.
|
||||
|
@ -210,4 +210,4 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-
|
||||
|
||||
## See also
|
||||
|
||||
- [S3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -288,5 +288,7 @@ If the data in ZooKeeper was lost or damaged, you can save data by moving it to
|
||||
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
||||
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
|
||||
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
||||
- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth)
|
||||
- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) <!--hide-->
|
||||
|
@ -23,7 +23,6 @@ The supported formats are:
|
||||
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
|
||||
| [Values](#data-format-values) | ✔ | ✔ |
|
||||
| [Vertical](#vertical) | ✗ | ✔ |
|
||||
| [VerticalRaw](#verticalraw) | ✗ | ✔ |
|
||||
| [JSON](#json) | ✗ | ✔ |
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✗ | ✔ |
|
||||
@ -60,6 +59,7 @@ The supported formats are:
|
||||
| [LineAsString](#lineasstring) | ✔ | ✗ |
|
||||
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](../operations/settings/settings.md) section.
|
||||
|
||||
@ -943,10 +943,6 @@ test: string with 'quotes' and with some special
|
||||
|
||||
This format is only appropriate for outputting a query result, but not for parsing (retrieving data to insert in a table).
|
||||
|
||||
## VerticalRaw {#verticalraw}
|
||||
|
||||
Similar to [Vertical](#vertical), but with escaping disabled. This format is only suitable for outputting query results, not for parsing (receiving data and inserting it in the table).
|
||||
|
||||
## XML {#xml}
|
||||
|
||||
XML format is suitable only for output, not for parsing. Example:
|
||||
@ -1551,4 +1547,31 @@ Result:
|
||||
f9725a22f9191e064120d718e26862a9 -
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/interfaces/formats/) <!--hide-->
|
||||
## MsgPack {#msgpack}
|
||||
|
||||
ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data files.
|
||||
|
||||
### Data Types Matching {#data-types-matching-msgpack}
|
||||
|
||||
| MsgPack data type | ClickHouse data type |
|
||||
|---------------------------------|----------------------------------------------------------------------------------|
|
||||
| `uint N`, `positive fixint` | [UIntN](../sql-reference/data-types/int-uint.md) |
|
||||
| `int N` | [IntN](../sql-reference/data-types/int-uint.md) |
|
||||
| `fixstr`, `str 8`, `str 16`, `str 32` | [String](../sql-reference/data-types/string.md), [FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `float 32` | [Float32](../sql-reference/data-types/float.md) |
|
||||
| `float 64` | [Float64](../sql-reference/data-types/float.md) |
|
||||
| `uint 16` | [Date](../sql-reference/data-types/date.md) |
|
||||
| `uint 32` | [DateTime](../sql-reference/data-types/datetime.md) |
|
||||
| `uint 64` | [DateTime64](../sql-reference/data-types/datetime.md) |
|
||||
| `fixarray`, `array 16`, `array 32`| [Array](../sql-reference/data-types/array.md) |
|
||||
| `nil` | [Nothing](../sql-reference/data-types/special-data-types/nothing.md) |
|
||||
|
||||
Example:
|
||||
|
||||
Writing to a file ".msgpk":
|
||||
|
||||
```sql
|
||||
$ clickhouse-client --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory;"
|
||||
$ clickhouse-client --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])";
|
||||
$ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack.msgpk;
|
||||
```
|
||||
|
34
docs/en/interfaces/third-party/gui.md
vendored
34
docs/en/interfaces/third-party/gui.md
vendored
@ -84,7 +84,7 @@ Features:
|
||||
- Table data preview.
|
||||
- Full-text search.
|
||||
|
||||
By default, DBeaver does not connect using a session (the CLI for example does). If you require session support (for example to set settings for your session), edit the driver connection properties and set session_id to a random string (it uses the http connection under the hood). Then you can use any setting from the query window
|
||||
By default, DBeaver does not connect using a session (the CLI for example does). If you require session support (for example to set settings for your session), edit the driver connection properties and set `session_id` to a random string (it uses the http connection under the hood). Then you can use any setting from the query window.
|
||||
|
||||
### clickhouse-cli {#clickhouse-cli}
|
||||
|
||||
@ -113,6 +113,22 @@ Features:
|
||||
|
||||
[MindsDB](https://mindsdb.com/) is an open-source AI layer for databases including ClickHouse that allows you to effortlessly develop, train and deploy state-of-the-art machine learning models. MindsDB Studio(GUI) allows you to train new models from database, interpret predictions made by the model, identify potential data biases, and evaluate and visualize model accuracy using the Explainable AI function to adapt and tune your Machine Learning models faster.
|
||||
|
||||
### DBM {#dbm}
|
||||
|
||||
[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse!
|
||||
|
||||
Features:
|
||||
|
||||
- Support query history (pagination, clear all, etc.)
|
||||
- Support selected sql clauses query
|
||||
- Support terminating query
|
||||
- Support table management (metadata, delete, preview)
|
||||
- Support database management (delete, create)
|
||||
- Support custom query
|
||||
- Support multiple data sources management(connection test, monitoring)
|
||||
- Support monitor (processor, connection, query)
|
||||
- Support migrate data
|
||||
|
||||
## Commercial {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
@ -190,20 +206,4 @@ SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/i
|
||||
|
||||
[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want.
|
||||
|
||||
### DBM {#dbm}
|
||||
|
||||
[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse!
|
||||
|
||||
Features:
|
||||
|
||||
- Support query history (pagination, clear all, etc.)
|
||||
- Support selected sql clauses query
|
||||
- Support terminating query
|
||||
- Support table management (metadata, delete, preview)
|
||||
- Support database management (delete, create)
|
||||
- Support custom query
|
||||
- Support multiple data sources management(connection test, monitoring)
|
||||
- Support monitor (processor, connection, query)
|
||||
- Support migrate data
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) <!--hide-->
|
||||
|
@ -3,58 +3,58 @@ toc_priority: 66
|
||||
toc_title: ClickHouse Keeper
|
||||
---
|
||||
|
||||
# [pre-production] clickhouse-keeper
|
||||
# [pre-production] ClickHouse Keeper
|
||||
|
||||
ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper.
|
||||
ClickHouse server uses [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper.
|
||||
|
||||
!!! warning "Warning"
|
||||
This feature currently in pre-production stage. We test it in our CI and on small internal installations.
|
||||
This feature is currently in the pre-production stage. We test it in our CI and on small internal installations.
|
||||
|
||||
## Implementation details
|
||||
|
||||
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
|
||||
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper ClickHouse Keeper is written in C++ and uses [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
|
||||
|
||||
By default, `clickhouse-keeper` provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with `clickhouse-keeper`. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to `clickhouse-keeper` snapshot. Interserver protocol in `clickhouse-keeper` also incompatible with ZooKeeper so mixed ZooKeeper/clickhouse-keeper cluster is impossible.
|
||||
By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with ClickHouse Keeper. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to ClickHouse Keeper snapshot. Interserver protocol in ClickHouse Keeper is also incompatible with ZooKeeper so mixed ZooKeeper / ClickHouse Keeper cluster is impossible.
|
||||
|
||||
## Configuration
|
||||
|
||||
`clickhouse-keeper` can be used as a standalone replacement for ZooKeeper or as an internal part of the `clickhouse-server`, but in both cases configuration is almost the same `.xml` file. The main `clickhouse-keeper` configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||
ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server, but in both cases configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||
|
||||
- `tcp_port` — the port for a client to connect (default for ZooKeeper is `2181`)
|
||||
- `tcp_port_secure` — the secure port for a client to connect
|
||||
- `server_id` — unique server id, each participant of the clickhouse-keeper cluster must have a unique number (1, 2, 3, and so on)
|
||||
- `log_storage_path` — path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper)
|
||||
- `snapshot_storage_path` — path to coordination snapshots
|
||||
- `tcp_port` — Port for a client to connect (default for ZooKeeper is `2181`).
|
||||
- `tcp_port_secure` — Secure port for a client to connect.
|
||||
- `server_id` — Unique server id, each participant of the ClickHouse Keeper cluster must have a unique number (1, 2, 3, and so on).
|
||||
- `log_storage_path` — Path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper).
|
||||
- `snapshot_storage_path` — Path to coordination snapshots.
|
||||
|
||||
Other common parameters are inherited from clickhouse-server config (`listen_host`, `logger` and so on).
|
||||
Other common parameters are inherited from the ClickHouse server config (`listen_host`, `logger`, and so on).
|
||||
|
||||
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
|
||||
|
||||
- `operation_timeout_ms` — timeout for a single client operation (default: 10000)
|
||||
- `session_timeout_ms` — timeout for client session (default: 30000)
|
||||
- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them (default: 500)
|
||||
- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers (default: 500)
|
||||
- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000)
|
||||
- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election (default: 2000)
|
||||
- `rotate_log_storage_interval` — how many log records to store in a single file (default: 100000)
|
||||
- `reserved_log_items` — how many coordination log records to store before compaction (default: 100000)
|
||||
- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of records in logs) (default: 100000)
|
||||
- `snapshots_to_keep` — how many snapshots to keep (default: 3)
|
||||
- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs (default: 10000)
|
||||
- `fresh_log_gap` - when node became fresh (default: 200)
|
||||
- `max_requests_batch_size` - max size of batch in requests count before it will be sent to RAFT (default: 100)
|
||||
- `force_sync` — call `fsync` on each write to coordination log (default: true)
|
||||
- `quorum_reads` - execute read requests as writes through whole RAFT consesus with similar speed (default: false)
|
||||
- `raft_logs_level` — text logging level about coordination (trace, debug, and so on) (default: system default)
|
||||
- `auto_forwarding` - allow to forward write requests from followers to leader (default: true)
|
||||
- `shutdown_timeout` — wait to finish internal connections and shutdown (ms) (default: 5000)
|
||||
- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000)
|
||||
- `operation_timeout_ms` — Timeout for a single client operation (ms) (default: 10000).
|
||||
- `session_timeout_ms` — Timeout for client session (ms) (default: 30000).
|
||||
- `dead_session_check_period_ms` — How often ClickHouse Keeper check dead sessions and remove them (ms) (default: 500).
|
||||
- `heart_beat_interval_ms` — How often a ClickHouse Keeper leader will send heartbeats to followers (ms) (default: 500).
|
||||
- `election_timeout_lower_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000).
|
||||
- `election_timeout_upper_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election (default: 2000).
|
||||
- `rotate_log_storage_interval` — How many log records to store in a single file (default: 100000).
|
||||
- `reserved_log_items` — How many coordination log records to store before compaction (default: 100000).
|
||||
- `snapshot_distance` — How often ClickHouse Keeper will create new snapshots (in the number of records in logs) (default: 100000).
|
||||
- `snapshots_to_keep` — How many snapshots to keep (default: 3).
|
||||
- `stale_log_gap` — Threshold when leader considers follower as stale and sends the snapshot to it instead of logs (default: 10000).
|
||||
- `fresh_log_gap` — When node became fresh (default: 200).
|
||||
- `max_requests_batch_size` - Max size of batch in requests count before it will be sent to RAFT (default: 100).
|
||||
- `force_sync` — Call `fsync` on each write to coordination log (default: true).
|
||||
- `quorum_reads` — Execute read requests as writes through whole RAFT consensus with similar speed (default: false).
|
||||
- `raft_logs_level` — Text logging level about coordination (trace, debug, and so on) (default: system default).
|
||||
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
|
||||
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
||||
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
||||
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
||||
|
||||
- `id` — server_id in quorum
|
||||
- `hostname` — hostname where this server placed
|
||||
- `port` — port where this server listen for connections
|
||||
- `id` — Server identifier in a quorum.
|
||||
- `hostname` — Hostname where this server is placed.
|
||||
- `port` — Port where this server listens for connections.
|
||||
|
||||
|
||||
Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1:
|
||||
@ -94,7 +94,7 @@ Examples of configuration for quorum with three nodes can be found in [integrati
|
||||
|
||||
## How to run
|
||||
|
||||
`clickhouse-keeper` is bundled into `clickhouse-server` package, just add configuration of `<keeper_server>` and start clickhouse-server as always. If you want to run standalone `clickhouse-keeper` you can start it in a similar way with:
|
||||
ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `<keeper_server>` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with:
|
||||
|
||||
```bash
|
||||
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||
@ -102,17 +102,18 @@ clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||
|
||||
## [experimental] Migration from ZooKeeper
|
||||
|
||||
Seamlessly migration from ZooKeeper to `clickhouse-keeper` is impossible you have to stop your ZooKeeper cluster, convert data and start `clickhouse-keeper`. `clickhouse-keeper-converter` tool allows to convert ZooKeeper logs and snapshots to `clickhouse-keeper` snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||
|
||||
1. Stop all ZooKeeper nodes.
|
||||
|
||||
2. [optional, but recommended] Found ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create consistent snapshot.
|
||||
2. Optional, but recommended: find ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create a consistent snapshot.
|
||||
|
||||
3. Run `clickhouse-keeper-converter` on leader, example
|
||||
3. Run `clickhouse-keeper-converter` on a leader, for example:
|
||||
|
||||
```bash
|
||||
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
|
||||
```
|
||||
|
||||
4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist on all nodes, otherwise empty nodes can be faster and one of them can becamse leader.
|
||||
4. Copy snapshot to ClickHouse server nodes with a configured `keeper` or start ClickHouse Keeper instead of ZooKeeper. The snapshot must persist on all nodes, otherwise, empty nodes can be faster and one of them can become a leader.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/clickhouse-keeper/) <!--hide-->
|
||||
|
@ -69,29 +69,85 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
||||
</compression>
|
||||
```
|
||||
|
||||
<!--
|
||||
## encryption {#server-settings-encryption}
|
||||
|
||||
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). The command, or a shell script, is expected to write a Base64-encoded key of any length to the stdout.
|
||||
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in enviroment variables or be set in configuration file.
|
||||
|
||||
Keys can be hex or string. Their length must be equal to 16.
|
||||
|
||||
**Example**
|
||||
|
||||
For Linux with systemd:
|
||||
Load from config:
|
||||
|
||||
```xml
|
||||
<encryption>
|
||||
<key_command>/usr/bin/systemd-ask-password --id="clickhouse-server" --timeout=0 "Enter the ClickHouse encryption passphrase:" | base64</key_command>
|
||||
</encryption>
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key>12345567812345678</key>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
|
||||
For other systems:
|
||||
!!! note "NOTE"
|
||||
Storing keys in configuration file is not recommended. It isn't secure. You can move the keys into a separate config file on a secure disk and put a symlink to that config file to `config.d/` folder.
|
||||
|
||||
Load from config, when key is in hex:
|
||||
|
||||
```xml
|
||||
<encryption>
|
||||
<key_command><![CDATA[IFS=; echo -n >/dev/tty "Enter the ClickHouse encryption passphrase: "; stty=`stty -F /dev/tty -g`; stty -F /dev/tty -echo; read k </dev/tty; stty -F /dev/tty "$stty"; echo -n $k | base64]]></key_command>
|
||||
</encryption>
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex>00112233445566778899aabbccddeeff</key_hex>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
-->
|
||||
|
||||
Load key from environment variable:
|
||||
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex from_env="KEY"></key_hex>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
|
||||
Where current_key_id sets the current key for encryption, and all specified keys can be used for decryption.
|
||||
|
||||
All this methods can be applied for multiple keys:
|
||||
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex id="0">00112233445566778899aabbccddeeff</key_hex>
|
||||
<key_hex id="1" from_env=".."></key_hex>
|
||||
<current_key_id>1</current_key_id>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
|
||||
Where `current_key_id` shows current key for encryption.
|
||||
|
||||
Also user can add nonce that must be 12 bytes long (by default encryption and decryption will use nonce consisting of zero bytes):
|
||||
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<nonce>0123456789101</nonce>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
|
||||
Or it can be set in hex:
|
||||
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<nonce_hex>abcdefabcdef</nonce_hex>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
|
||||
Everything above can be applied for `aes_256_gcm_siv` (but key must be 32 bytes length).
|
||||
|
||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||
|
||||
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
||||
|
@ -181,6 +181,44 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## max_replicated_fetches_network_bandwidth {#max_replicated_fetches_network_bandwidth}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches. This setting is applied to a particular table, unlike the [max_replicated_fetches_network_bandwidth_for_server](settings.md#max_replicated_fetches_network_bandwidth_for_server) setting, which is applied to the server.
|
||||
|
||||
You can limit both server network and network for a particular table, but for this the value of the table-level setting should be less than server-level one. Otherwise the server considers only the `max_replicated_fetches_network_bandwidth_for_server` setting.
|
||||
|
||||
The setting isn't followed perfectly accurately.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Unlimited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
Could be used for throttling speed when replicating data to add or replace new nodes.
|
||||
|
||||
## max_replicated_sends_network_bandwidth {#max_replicated_sends_network_bandwidth}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends. This setting is applied to a particular table, unlike the [max_replicated_sends_network_bandwidth_for_server](settings.md#max_replicated_sends_network_bandwidth_for_server) setting, which is applied to the server.
|
||||
|
||||
You can limit both server network and network for a particular table, but for this the value of the table-level setting should be less than server-level one. Otherwise the server considers only the `max_replicated_sends_network_bandwidth_for_server` setting.
|
||||
|
||||
The setting isn't followed perfectly accurately.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Unlimited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
Could be used for throttling speed when replicating data to add or replace new nodes.
|
||||
|
||||
## old_parts_lifetime {#old-parts-lifetime}
|
||||
|
||||
The time (in seconds) of storing inactive parts to protect against data loss during spontaneous server reboots.
|
||||
|
@ -1140,6 +1140,40 @@ Possible values:
|
||||
|
||||
Default value: `5`.
|
||||
|
||||
## max_replicated_fetches_network_bandwidth_for_server {#max_replicated_fetches_network_bandwidth_for_server}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_fetches_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth) setting.
|
||||
|
||||
The setting isn't followed perfectly accurately.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Unlimited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) setting.
|
||||
|
||||
The setting isn't followed perfectly accurately.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Unlimited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
## connect_timeout_with_failover_ms {#connect-timeout-with-failover-ms}
|
||||
|
||||
The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition.
|
||||
@ -3499,6 +3533,30 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## replication_alter_partitions_sync {#replication-alter-partitions-sync}
|
||||
|
||||
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Do not wait.
|
||||
- 1 — Wait for own execution.
|
||||
- 2 — Wait for everyone.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout}
|
||||
|
||||
Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Do not wait.
|
||||
- Negative integer — Wait for unlimited time.
|
||||
- Positive integer — The number of seconds to wait.
|
||||
|
||||
Default value: `120` seconds.
|
||||
|
||||
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
|
||||
|
||||
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function.
|
||||
@ -3508,3 +3566,91 @@ Possible values:
|
||||
- Positive integer.
|
||||
|
||||
Default value: `1000`.
|
||||
|
||||
## max_hyperscan_regexp_length {#max-hyperscan-regexp-length}
|
||||
|
||||
Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 - The length is not limited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 3;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─multiMatchAny('abcd', ['ab', 'bcd', 'c', 'd'])─┐
|
||||
│ 1 │
|
||||
└────────────────────────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 2;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
Exception: Regexp length too large.
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [max_hyperscan_regexp_total_length](#max-hyperscan-regexp-total-length)
|
||||
|
||||
|
||||
## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length}
|
||||
|
||||
Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 - The length is not limited.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT multiMatchAny('abcd', ['a','b','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─multiMatchAny('abcd', ['a', 'b', 'c', 'd'])─┐
|
||||
│ 1 │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT multiMatchAny('abcd', ['ab','bc','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
Exception: Total regexp lengths too large.
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [max_hyperscan_regexp_length](#max-hyperscan-regexp-length)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user