Merge branch 'master' into feature/fix-session-expired-when-selecting-system-tables

This commit is contained in:
Fuwang Hu 2021-05-11 20:04:36 +08:00 committed by GitHub
commit a312dd414c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
774 changed files with 43779 additions and 7485 deletions

View File

@ -169,8 +169,8 @@ endif ()
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64. # Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON) option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY) elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration") message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")

View File

@ -0,0 +1,216 @@
#pragma once
#include <cstdint>
#include <cstddef>
#include <cstring>
#include <common/extended_types.h>
/// Allows to check the internals of IEEE-754 floating point number.
template <typename T> struct FloatTraits;
template <>
struct FloatTraits<float>
{
using UInt = uint32_t;
static constexpr size_t bits = 32;
static constexpr size_t exponent_bits = 8;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
template <>
struct FloatTraits<double>
{
using UInt = uint64_t;
static constexpr size_t bits = 64;
static constexpr size_t exponent_bits = 11;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
/// x = sign * (2 ^ normalized_exponent) * (1 + mantissa * 2 ^ -mantissa_bits)
/// x = sign * (2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits))
template <typename T>
struct DecomposedFloat
{
using Traits = FloatTraits<T>;
DecomposedFloat(T x)
{
memcpy(&x_uint, &x, sizeof(x));
}
typename Traits::UInt x_uint;
bool is_negative() const
{
return x_uint >> (Traits::bits - 1);
}
/// Returns 0 for both +0. and -0.
int sign() const
{
return (exponent() == 0 && mantissa() == 0)
? 0
: (is_negative()
? -1
: 1);
}
uint16_t exponent() const
{
return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1);
}
int16_t normalized_exponent() const
{
return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1);
}
uint64_t mantissa() const
{
return x_uint & ((1ull << Traits::mantissa_bits) - 1);
}
int64_t mantissa_with_sign() const
{
return is_negative() ? -mantissa() : mantissa();
}
/// NOTE Probably floating point instructions can be better.
bool is_integer_in_representable_range() const
{
return x_uint == 0
|| (normalized_exponent() >= 0 /// The number is not less than one
/// The number is inside the range where every integer has exact representation in float
&& normalized_exponent() <= static_cast<int16_t>(Traits::mantissa_bits)
/// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0));
}
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
/// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers.
/// (note that we need total order)
template <typename Int>
int compare(Int rhs)
{
if (rhs == 0)
return sign();
/// Different signs
if (is_negative() && rhs > 0)
return -1;
if (!is_negative() && rhs < 0)
return 1;
/// Fractional number with magnitude less than one
if (normalized_exponent() < 0)
{
if (!is_negative())
return rhs > 0 ? -1 : 1;
else
return rhs >= 0 ? -1 : 1;
}
/// The case of the most negative integer
if constexpr (is_signed_v<Int>)
{
if (rhs == std::numeric_limits<Int>::lowest())
{
assert(is_negative());
if (normalized_exponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return 1;
if (normalized_exponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return -1;
if (mantissa() == 0)
return 0;
else
return -1;
}
}
/// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN.
if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return is_negative() ? -1 : 1;
using UInt = make_unsigned_t<Int>;
UInt uint_rhs = rhs < 0 ? -rhs : rhs;
/// Smaller octave: abs(rhs) < abs(float)
if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent()))
return is_negative() ? -1 : 1;
/// Larger octave: abs(rhs) > abs(float)
if (normalized_exponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
&& uint_rhs >= (static_cast<UInt>(1) << (normalized_exponent() + 1)))
return is_negative() ? 1 : -1;
/// The same octave
/// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)
bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits);
typename Traits::UInt a = large_and_always_integer
? mantissa() << (normalized_exponent() - Traits::mantissa_bits)
: mantissa() >> (Traits::mantissa_bits - normalized_exponent());
typename Traits::UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent());
if (a < b)
return is_negative() ? 1 : -1;
if (a > b)
return is_negative() ? -1 : 1;
/// Float has no fractional part means that the numbers are equal.
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)
return 0;
else
/// Float has fractional part means its abs value is larger.
return is_negative() ? -1 : 1;
}
template <typename Int>
bool equals(Int rhs)
{
return compare(rhs) == 0;
}
template <typename Int>
bool notEquals(Int rhs)
{
return compare(rhs) != 0;
}
template <typename Int>
bool less(Int rhs)
{
return compare(rhs) < 0;
}
template <typename Int>
bool greater(Int rhs)
{
return compare(rhs) > 0;
}
template <typename Int>
bool lessOrEquals(Int rhs)
{
return compare(rhs) <= 0;
}
template <typename Int>
bool greaterOrEquals(Int rhs)
{
return compare(rhs) >= 0;
}
};
using DecomposedFloat64 = DecomposedFloat<double>;
using DecomposedFloat32 = DecomposedFloat<float>;

View File

@ -91,6 +91,10 @@ ReplxxLineReader::ReplxxLineReader(
/// it also binded to M-p/M-n). /// it also binded to M-p/M-n).
rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); }); rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); });
rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); }); rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); });
/// By default M-BACKSPACE is KILL_TO_WHITESPACE_ON_LEFT, while in readline it is backward-kill-word
rx.bind_key(Replxx::KEY::meta(Replxx::KEY::BACKSPACE), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_BEGINING_OF_WORD, code); });
/// By default C-w is KILL_TO_BEGINING_OF_WORD, while in readline it is unix-word-rubout
rx.bind_key(Replxx::KEY::control('W'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_WHITESPACE_ON_LEFT, code); });
rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; }); rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; });
} }

View File

@ -56,27 +56,33 @@ namespace common
} }
template <> template <>
inline bool addOverflow(__int128 x, __int128 y, __int128 & res) inline bool addOverflow(Int128 x, Int128 y, Int128 & res)
{ {
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = addIgnoreOverflow(x, y); res = addIgnoreOverflow(x, y);
return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y); return (y > 0 && x > std::numeric_limits<Int128>::max() - y) ||
(y < 0 && x < std::numeric_limits<Int128>::min() - y);
} }
template <> template <>
inline bool addOverflow(wInt256 x, wInt256 y, wInt256 & res) inline bool addOverflow(UInt128 x, UInt128 y, UInt128 & res)
{ {
res = addIgnoreOverflow(x, y); res = addIgnoreOverflow(x, y);
return (y > 0 && x > std::numeric_limits<wInt256>::max() - y) || return x > std::numeric_limits<UInt128>::max() - y;
(y < 0 && x < std::numeric_limits<wInt256>::min() - y);
} }
template <> template <>
inline bool addOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) inline bool addOverflow(Int256 x, Int256 y, Int256 & res)
{ {
res = addIgnoreOverflow(x, y); res = addIgnoreOverflow(x, y);
return x > std::numeric_limits<wUInt256>::max() - y; return (y > 0 && x > std::numeric_limits<Int256>::max() - y) ||
(y < 0 && x < std::numeric_limits<Int256>::min() - y);
}
template <>
inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res)
{
res = addIgnoreOverflow(x, y);
return x > std::numeric_limits<UInt256>::max() - y;
} }
template <typename T> template <typename T>
@ -104,24 +110,30 @@ namespace common
} }
template <> template <>
inline bool subOverflow(__int128 x, __int128 y, __int128 & res) inline bool subOverflow(Int128 x, Int128 y, Int128 & res)
{ {
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = subIgnoreOverflow(x, y); res = subIgnoreOverflow(x, y);
return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y); return (y < 0 && x > std::numeric_limits<Int128>::max() + y) ||
(y > 0 && x < std::numeric_limits<Int128>::min() + y);
} }
template <> template <>
inline bool subOverflow(wInt256 x, wInt256 y, wInt256 & res) inline bool subOverflow(UInt128 x, UInt128 y, UInt128 & res)
{ {
res = subIgnoreOverflow(x, y); res = subIgnoreOverflow(x, y);
return (y < 0 && x > std::numeric_limits<wInt256>::max() + y) || return x < y;
(y > 0 && x < std::numeric_limits<wInt256>::min() + y);
} }
template <> template <>
inline bool subOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) inline bool subOverflow(Int256 x, Int256 y, Int256 & res)
{
res = subIgnoreOverflow(x, y);
return (y < 0 && x > std::numeric_limits<Int256>::max() + y) ||
(y > 0 && x < std::numeric_limits<Int256>::min() + y);
}
template <>
inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res)
{ {
res = subIgnoreOverflow(x, y); res = subIgnoreOverflow(x, y);
return x < y; return x < y;
@ -151,36 +163,33 @@ namespace common
return __builtin_smulll_overflow(x, y, &res); return __builtin_smulll_overflow(x, y, &res);
} }
/// Overflow check is not implemented for big integers.
template <> template <>
inline bool mulOverflow(__int128 x, __int128 y, __int128 & res) inline bool mulOverflow(Int128 x, Int128 y, Int128 & res)
{ {
res = mulIgnoreOverflow(x, y); res = mulIgnoreOverflow(x, y);
if (!x || !y) return false;
return false;
unsigned __int128 a = (x > 0) ? x : -x;
unsigned __int128 b = (y > 0) ? y : -y;
return mulIgnoreOverflow(a, b) / b != a;
} }
template <> template <>
inline bool mulOverflow(wInt256 x, wInt256 y, wInt256 & res) inline bool mulOverflow(Int256 x, Int256 y, Int256 & res)
{ {
res = mulIgnoreOverflow(x, y); res = mulIgnoreOverflow(x, y);
if (!x || !y) return false;
return false;
wInt256 a = (x > 0) ? x : -x;
wInt256 b = (y > 0) ? y : -y;
return mulIgnoreOverflow(a, b) / b != a;
} }
template <> template <>
inline bool mulOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) inline bool mulOverflow(UInt128 x, UInt128 y, UInt128 & res)
{ {
res = mulIgnoreOverflow(x, y); res = mulIgnoreOverflow(x, y);
if (!x || !y) return false;
return false; }
return res / y != x;
template <>
inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res)
{
res = mulIgnoreOverflow(x, y);
return false;
} }
} }

View File

@ -5,16 +5,14 @@
#include <common/types.h> #include <common/types.h>
#include <common/wide_integer.h> #include <common/wide_integer.h>
using Int128 = __int128;
using wInt256 = wide::integer<256, signed>; using Int128 = wide::integer<128, signed>;
using wUInt256 = wide::integer<256, unsigned>; using UInt128 = wide::integer<128, unsigned>;
using Int256 = wide::integer<256, signed>;
using UInt256 = wide::integer<256, unsigned>;
static_assert(sizeof(wInt256) == 32); static_assert(sizeof(Int256) == 32);
static_assert(sizeof(wUInt256) == 32); static_assert(sizeof(UInt256) == 32);
static constexpr __int128 minInt128() { return static_cast<unsigned __int128>(1) << 127; }
static constexpr __int128 maxInt128() { return (static_cast<unsigned __int128>(1) << 127) - 1; }
/// The standard library type traits, such as std::is_arithmetic, with one exception /// The standard library type traits, such as std::is_arithmetic, with one exception
/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior. /// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior.
@ -26,7 +24,7 @@ struct is_signed
}; };
template <> struct is_signed<Int128> { static constexpr bool value = true; }; template <> struct is_signed<Int128> { static constexpr bool value = true; };
template <> struct is_signed<wInt256> { static constexpr bool value = true; }; template <> struct is_signed<Int256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_signed_v = is_signed<T>::value; inline constexpr bool is_signed_v = is_signed<T>::value;
@ -37,7 +35,8 @@ struct is_unsigned
static constexpr bool value = std::is_unsigned_v<T>; static constexpr bool value = std::is_unsigned_v<T>;
}; };
template <> struct is_unsigned<wUInt256> { static constexpr bool value = true; }; template <> struct is_unsigned<UInt128> { static constexpr bool value = true; };
template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_unsigned_v = is_unsigned<T>::value; inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
@ -51,8 +50,9 @@ struct is_integer
}; };
template <> struct is_integer<Int128> { static constexpr bool value = true; }; template <> struct is_integer<Int128> { static constexpr bool value = true; };
template <> struct is_integer<wInt256> { static constexpr bool value = true; }; template <> struct is_integer<UInt128> { static constexpr bool value = true; };
template <> struct is_integer<wUInt256> { static constexpr bool value = true; }; template <> struct is_integer<Int256> { static constexpr bool value = true; };
template <> struct is_integer<UInt256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_integer_v = is_integer<T>::value; inline constexpr bool is_integer_v = is_integer<T>::value;
@ -64,7 +64,11 @@ struct is_arithmetic
static constexpr bool value = std::is_arithmetic_v<T>; static constexpr bool value = std::is_arithmetic_v<T>;
}; };
template <> struct is_arithmetic<__int128> { static constexpr bool value = true; }; template <> struct is_arithmetic<Int128> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value; inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
@ -75,9 +79,10 @@ struct make_unsigned
typedef std::make_unsigned_t<T> type; typedef std::make_unsigned_t<T> type;
}; };
template <> struct make_unsigned<Int128> { using type = unsigned __int128; }; template <> struct make_unsigned<Int128> { using type = UInt128; };
template <> struct make_unsigned<wInt256> { using type = wUInt256; }; template <> struct make_unsigned<UInt128> { using type = UInt128; };
template <> struct make_unsigned<wUInt256> { using type = wUInt256; }; template <> struct make_unsigned<Int256> { using type = UInt256; };
template <> struct make_unsigned<UInt256> { using type = UInt256; };
template <typename T> using make_unsigned_t = typename make_unsigned<T>::type; template <typename T> using make_unsigned_t = typename make_unsigned<T>::type;
@ -87,8 +92,10 @@ struct make_signed
typedef std::make_signed_t<T> type; typedef std::make_signed_t<T> type;
}; };
template <> struct make_signed<wInt256> { using type = wInt256; }; template <> struct make_signed<Int128> { using type = Int128; };
template <> struct make_signed<wUInt256> { using type = wInt256; }; template <> struct make_signed<UInt128> { using type = Int128; };
template <> struct make_signed<Int256> { using type = Int256; };
template <> struct make_signed<UInt256> { using type = Int256; };
template <typename T> using make_signed_t = typename make_signed<T>::type; template <typename T> using make_signed_t = typename make_signed<T>::type;
@ -98,8 +105,10 @@ struct is_big_int
static constexpr bool value = false; static constexpr bool value = false;
}; };
template <> struct is_big_int<wInt256> { static constexpr bool value = true; }; template <> struct is_big_int<Int128> { static constexpr bool value = true; };
template <> struct is_big_int<wUInt256> { static constexpr bool value = true; }; template <> struct is_big_int<UInt128> { static constexpr bool value = true; };
template <> struct is_big_int<Int256> { static constexpr bool value = true; };
template <> struct is_big_int<UInt256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_big_int_v = is_big_int<T>::value; inline constexpr bool is_big_int_v = is_big_int<T>::value;

View File

@ -25,6 +25,10 @@ uint64_t getThreadId()
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
#elif defined(OS_FREEBSD) #elif defined(OS_FREEBSD)
current_tid = pthread_getthreadid_np(); current_tid = pthread_getthreadid_np();
#elif defined(OS_SUNOS)
// On Solaris-derived systems, this returns the ID of the LWP, analogous
// to a thread.
current_tid = static_cast<uint64_t>(pthread_self());
#else #else
if (0 != pthread_threadid_np(nullptr, &current_tid)) if (0 != pthread_threadid_np(nullptr, &current_tid))
throw std::logic_error("pthread_threadid_np returned error"); throw std::logic_error("pthread_threadid_np returned error");

View File

@ -30,9 +30,8 @@
#include <cstddef> #include <cstddef>
#include <cstring> #include <cstring>
#include <type_traits> #include <type_traits>
#include <common/extended_types.h>
using int128_t = __int128;
using uint128_t = unsigned __int128;
namespace impl namespace impl
{ {
@ -106,7 +105,7 @@ using UnsignedOfSize = typename SelectType
uint16_t, uint16_t,
uint32_t, uint32_t,
uint64_t, uint64_t,
uint128_t __uint128_t
>::Result; >::Result;
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in /// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
@ -313,7 +312,8 @@ namespace convert
} }
} }
static inline int digits10(uint128_t x) template <typename T>
static inline int digits10(T x)
{ {
if (x < 10ULL) if (x < 10ULL)
return 1; return 1;
@ -346,8 +346,11 @@ static inline int digits10(uint128_t x)
return 12 + digits10(x / 1000000000000ULL); return 12 + digits10(x / 1000000000000ULL);
} }
static inline char * writeUIntText(uint128_t x, char * p) template <typename T>
static inline char * writeUIntText(T x, char * p)
{ {
static_assert(is_unsigned_v<T>);
int len = digits10(x); int len = digits10(x);
auto pp = p + len; auto pp = p + len;
while (x >= 100) while (x >= 100)
@ -370,14 +373,28 @@ static inline char * writeLeadingMinus(char * pos)
return pos + 1; return pos + 1;
} }
static inline char * writeSIntText(int128_t x, char * pos) template <typename T>
static inline char * writeSIntText(T x, char * pos)
{ {
static constexpr int128_t min_int128 = uint128_t(1) << 127; static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
if (unlikely(x == min_int128)) using UnsignedT = make_unsigned_t<T>;
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
if (unlikely(x == min_int))
{ {
memcpy(pos, "-170141183460469231731687303715884105728", 40); if constexpr (std::is_same_v<T, Int128>)
return pos + 40; {
const char * res = "-170141183460469231731687303715884105728";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
else if constexpr (std::is_same_v<T, Int256>)
{
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
} }
if (x < 0) if (x < 0)
@ -385,7 +402,7 @@ static inline char * writeSIntText(int128_t x, char * pos)
x = -x; x = -x;
pos = writeLeadingMinus(pos); pos = writeLeadingMinus(pos);
} }
return writeUIntText(static_cast<uint128_t>(x), pos); return writeUIntText(UnsignedT(x), pos);
} }
} }
@ -403,13 +420,25 @@ inline char * itoa(char8_t i, char * p)
} }
template <> template <>
inline char * itoa<uint128_t>(uint128_t i, char * p) inline char * itoa(UInt128 i, char * p)
{ {
return impl::writeUIntText(i, p); return impl::writeUIntText(i, p);
} }
template <> template <>
inline char * itoa<int128_t>(int128_t i, char * p) inline char * itoa(Int128 i, char * p)
{
return impl::writeSIntText(i, p);
}
template <>
inline char * itoa(UInt256 i, char * p)
{
return impl::writeUIntText(i, p);
}
template <>
inline char * itoa(Int256 i, char * p)
{ {
return impl::writeSIntText(i, p); return impl::writeSIntText(i, p);
} }

View File

@ -4,7 +4,8 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
template <class T, class Tag>
template <typename T, typename Tag>
struct StrongTypedef struct StrongTypedef
{ {
private: private:
@ -38,14 +39,16 @@ public:
bool operator==(const Self & rhs) const { return t == rhs.t; } bool operator==(const Self & rhs) const { return t == rhs.t; }
bool operator<(const Self & rhs) const { return t < rhs.t; } bool operator<(const Self & rhs) const { return t < rhs.t; }
bool operator>(const Self & rhs) const { return t > rhs.t; }
T & toUnderType() { return t; } T & toUnderType() { return t; }
const T & toUnderType() const { return t; } const T & toUnderType() const { return t; }
}; };
namespace std namespace std
{ {
template <class T, class Tag> template <typename T, typename Tag>
struct hash<StrongTypedef<T, Tag>> struct hash<StrongTypedef<T, Tag>>
{ {
size_t operator()(const StrongTypedef<T, Tag> & x) const size_t operator()(const StrongTypedef<T, Tag> & x) const

View File

@ -1,13 +1,15 @@
#pragma once #pragma once
#include <stdexcept> #include <stdexcept>
/// Throw DB::Exception-like exception before its definition. /// Throw DB::Exception-like exception before its definition.
/// DB::Exception derived from Poco::Exception derived from std::exception. /// DB::Exception derived from Poco::Exception derived from std::exception.
/// DB::Exception generally cought as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes. /// DB::Exception generally caught as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes.
/// DB::Exception is not defined yet. It'd better to throw Poco::Exception but we do not want to include any big header here, even <string>. /// DB::Exception is not defined yet. It'd better to throw Poco::Exception but we do not want to include any big header here, even <string>.
/// So we throw some std::exception instead in the hope its catch block is the same as DB::Exception one. /// So we throw some std::exception instead in the hope its catch block is the same as DB::Exception one.
template <typename T> template <typename T>
inline void throwError(const T & err) [[noreturn]] inline void throwError(const T & err)
{ {
throw std::runtime_error(err); throw std::runtime_error(err);
} }

View File

@ -2,7 +2,7 @@
#include <time.h> #include <time.h>
#if defined (OS_DARWIN) #if defined (OS_DARWIN) || defined (OS_SUNOS)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#elif defined (OS_FREEBSD) #elif defined (OS_FREEBSD)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST

View File

@ -13,7 +13,12 @@ using char8_t = unsigned char;
#endif #endif
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713 /// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly.
using UInt8 = char8_t; using UInt8 = char8_t;
#else
using UInt8 = uint8_t;
#endif
using UInt16 = uint16_t; using UInt16 = uint16_t;
using UInt32 = uint32_t; using UInt32 = uint32_t;
using UInt64 = uint64_t; using UInt64 = uint64_t;

View File

@ -58,9 +58,11 @@ public:
using signed_base_type = int64_t; using signed_base_type = int64_t;
// ctors // ctors
constexpr integer() noexcept; constexpr integer() noexcept = default;
template <typename T> template <typename T>
constexpr integer(T rhs) noexcept; constexpr integer(T rhs) noexcept;
template <typename T> template <typename T>
constexpr integer(std::initializer_list<T> il) noexcept; constexpr integer(std::initializer_list<T> il) noexcept;
@ -108,9 +110,9 @@ public:
constexpr explicit operator bool() const noexcept; constexpr explicit operator bool() const noexcept;
template <class T> template <class T>
using __integral_not_wide_integer_class = typename std::enable_if<std::is_arithmetic<T>::value, T>::type; using _integral_not_wide_integer_class = typename std::enable_if<std::is_arithmetic<T>::value, T>::type;
template <class T, class = __integral_not_wide_integer_class<T>> template <class T, class = _integral_not_wide_integer_class<T>>
constexpr operator T() const noexcept; constexpr operator T() const noexcept;
constexpr operator long double() const noexcept; constexpr operator long double() const noexcept;
@ -119,25 +121,27 @@ public:
struct _impl; struct _impl;
base_type items[_impl::item_count];
private: private:
template <size_t Bits2, typename Signed2> template <size_t Bits2, typename Signed2>
friend class integer; friend class integer;
friend class std::numeric_limits<integer<Bits, signed>>; friend class std::numeric_limits<integer<Bits, signed>>;
friend class std::numeric_limits<integer<Bits, unsigned>>; friend class std::numeric_limits<integer<Bits, unsigned>>;
base_type items[_impl::item_count];
}; };
template <typename T> template <typename T>
static constexpr bool ArithmeticConcept() noexcept; static constexpr bool ArithmeticConcept() noexcept;
template <class T1, class T2> template <class T1, class T2>
using __only_arithmetic = typename std::enable_if<ArithmeticConcept<T1>() && ArithmeticConcept<T2>()>::type; using _only_arithmetic = typename std::enable_if<ArithmeticConcept<T1>() && ArithmeticConcept<T2>()>::type;
template <typename T> template <typename T>
static constexpr bool IntegralConcept() noexcept; static constexpr bool IntegralConcept() noexcept;
template <class T, class T2> template <class T, class T2>
using __only_integer = typename std::enable_if<IntegralConcept<T>() && IntegralConcept<T2>()>::type; using _only_integer = typename std::enable_if<IntegralConcept<T>() && IntegralConcept<T2>()>::type;
// Unary operators // Unary operators
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
@ -153,54 +157,55 @@ constexpr integer<Bits, Signed> operator+(const integer<Bits, Signed> & lhs) noe
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator*(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator*(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator*(const Arithmetic & rhs, const Arithmetic2 & lhs); std::common_type_t<Arithmetic, Arithmetic2> constexpr operator*(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator/(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator/(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator/(const Arithmetic & rhs, const Arithmetic2 & lhs); std::common_type_t<Arithmetic, Arithmetic2> constexpr operator/(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator+(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator+(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator+(const Arithmetic & rhs, const Arithmetic2 & lhs); std::common_type_t<Arithmetic, Arithmetic2> constexpr operator+(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator-(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator-(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator-(const Arithmetic & rhs, const Arithmetic2 & lhs); std::common_type_t<Arithmetic, Arithmetic2> constexpr operator-(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator%(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator%(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>> template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator%(const Integral & rhs, const Integral2 & lhs); std::common_type_t<Integral, Integral2> constexpr operator%(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator&(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator&(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>> template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator&(const Integral & rhs, const Integral2 & lhs); std::common_type_t<Integral, Integral2> constexpr operator&(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator|(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator|(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>> template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator|(const Integral & rhs, const Integral2 & lhs); std::common_type_t<Integral, Integral2> constexpr operator|(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator^(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); operator^(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>> template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator^(const Integral & rhs, const Integral2 & lhs); std::common_type_t<Integral, Integral2> constexpr operator^(const Integral & rhs, const Integral2 & lhs);
// TODO: Integral // TODO: Integral
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept; constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept;
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept; constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept;
@ -217,32 +222,32 @@ constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, In
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator<(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator<(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator<(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator<(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator>(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator>(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator<=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator<=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator<=(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator<=(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator>=(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator>=(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator==(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator==(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator==(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator==(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator!=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs); constexpr bool operator!=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>> template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator!=(const Arithmetic & rhs, const Arithmetic2 & lhs); constexpr bool operator!=(const Arithmetic & rhs, const Arithmetic2 & lhs);
} }

View File

@ -5,6 +5,7 @@
/// (See at http://www.boost.org/LICENSE_1_0.txt) /// (See at http://www.boost.org/LICENSE_1_0.txt)
#include "throwError.h" #include "throwError.h"
#include <cmath> #include <cmath>
#include <cfloat> #include <cfloat>
#include <cassert> #include <cassert>
@ -81,7 +82,7 @@ public:
res.items[T::_impl::big(0)] = std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::min(); res.items[T::_impl::big(0)] = std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::min();
return res; return res;
} }
return 0; return wide::integer<Bits, Signed>(0);
} }
static constexpr wide::integer<Bits, Signed> max() noexcept static constexpr wide::integer<Bits, Signed> max() noexcept
@ -176,7 +177,7 @@ struct integer<Bits, Signed>::_impl
constexpr static bool is_negative(const integer<B, T> & n) noexcept constexpr static bool is_negative(const integer<B, T> & n) noexcept
{ {
if constexpr (std::is_same_v<T, signed>) if constexpr (std::is_same_v<T, signed>)
return static_cast<signed_base_type>(n.items[big(0)]) < 0; return static_cast<signed_base_type>(n.items[integer<B, T>::_impl::big(0)]) < 0;
else else
return false; return false;
} }
@ -193,40 +194,36 @@ struct integer<Bits, Signed>::_impl
template <size_t B, class S> template <size_t B, class S>
constexpr static integer<B, S> make_positive(const integer<B, S> & n) noexcept constexpr static integer<B, S> make_positive(const integer<B, S> & n) noexcept
{ {
return is_negative(n) ? operator_unary_minus(n) : n; return is_negative(n) ? integer<B, S>(operator_unary_minus(n)) : n;
} }
template <typename T> template <typename T>
__attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept __attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept
{ {
if constexpr (std::is_same_v<T, __int128>) if constexpr (std::is_signed_v<T>)
return f;
else if constexpr (std::is_signed_v<T>)
return static_cast<int64_t>(f); return static_cast<int64_t>(f);
else else
return static_cast<uint64_t>(f); return static_cast<uint64_t>(f);
} }
template <typename Integral> template <typename Integral>
constexpr static void wide_integer_from_bultin(integer<Bits, Signed> & self, Integral rhs) noexcept constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, Integral rhs) noexcept
{ {
self.items[0] = _impl::to_Integral(rhs); static_assert(sizeof(Integral) <= sizeof(base_type));
if constexpr (std::is_same_v<Integral, __int128>)
self.items[1] = rhs >> base_bits;
constexpr const unsigned start = (sizeof(Integral) == 16) ? 2 : 1; self.items[0] = _impl::to_Integral(rhs);
if constexpr (std::is_signed_v<Integral>) if constexpr (std::is_signed_v<Integral>)
{ {
if (rhs < 0) if (rhs < 0)
{ {
for (unsigned i = start; i < item_count; ++i) for (size_t i = 1; i < item_count; ++i)
self.items[i] = -1; self.items[i] = -1;
return; return;
} }
} }
for (unsigned i = start; i < item_count; ++i) for (size_t i = 1; i < item_count; ++i)
self.items[i] = 0; self.items[i] = 0;
} }
@ -239,7 +236,8 @@ struct integer<Bits, Signed>::_impl
* a_(n - 1) = a_n * max_int + b2, a_n <= max_int <- base case. * a_(n - 1) = a_n * max_int + b2, a_n <= max_int <- base case.
*/ */
template <class T> template <class T>
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept { constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
{
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max(); constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast). /// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
@ -260,7 +258,8 @@ struct integer<Bits, Signed>::_impl
self += static_cast<uint64_t>(t - alpha * static_cast<T>(max_int)); // += b_i self += static_cast<uint64_t>(t - alpha * static_cast<T>(max_int)); // += b_i
} }
constexpr static void wide_integer_from_bultin(integer<Bits, Signed>& self, double rhs) noexcept { constexpr static void wide_integer_from_builtin(integer<Bits, Signed>& self, double rhs) noexcept
{
constexpr int64_t max_int = std::numeric_limits<int64_t>::max(); constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
constexpr int64_t min_int = std::numeric_limits<int64_t>::min(); constexpr int64_t min_int = std::numeric_limits<int64_t>::min();
@ -383,13 +382,13 @@ struct integer<Bits, Signed>::_impl
if (bit_shift) if (bit_shift)
lhs.items[big(items_shift)] |= std::numeric_limits<base_type>::max() << (base_bits - bit_shift); lhs.items[big(items_shift)] |= std::numeric_limits<base_type>::max() << (base_bits - bit_shift);
for (unsigned i = item_count - items_shift; i < items_shift; ++i) for (unsigned i = 0; i < items_shift; ++i)
lhs.items[little(i)] = std::numeric_limits<base_type>::max(); lhs.items[big(i)] = std::numeric_limits<base_type>::max();
} }
else else
{ {
for (unsigned i = item_count - items_shift; i < items_shift; ++i) for (unsigned i = 0; i < items_shift; ++i)
lhs.items[little(i)] = 0; lhs.items[big(i)] = 0;
} }
return lhs; return lhs;
@ -397,23 +396,23 @@ struct integer<Bits, Signed>::_impl
private: private:
template <typename T> template <typename T>
constexpr static base_type get_item(const T & x, unsigned number) constexpr static base_type get_item(const T & x, unsigned idx)
{ {
if constexpr (IsWideInteger<T>::value) if constexpr (IsWideInteger<T>::value)
{ {
if (number < T::_impl::item_count) if (idx < T::_impl::item_count)
return x.items[number]; return x.items[idx];
return 0; return 0;
} }
else else
{ {
if constexpr (sizeof(T) <= sizeof(base_type)) if constexpr (sizeof(T) <= sizeof(base_type))
{ {
if (!number) if (0 == idx)
return x; return x;
} }
else if (number * sizeof(base_type) < sizeof(T)) else if (idx * sizeof(base_type) < sizeof(T))
return x >> (number * base_bits); // & std::numeric_limits<base_type>::max() return x >> (idx * base_bits); // & std::numeric_limits<base_type>::max()
return 0; return 0;
} }
} }
@ -439,7 +438,7 @@ private:
for (unsigned i = 1; i < item_count; ++i) for (unsigned i = 1; i < item_count; ++i)
{ {
if (underflows[i-1]) if (underflows[i - 1])
{ {
base_type & res_item = res.items[little(i)]; base_type & res_item = res.items[little(i)];
if (res_item == 0) if (res_item == 0)
@ -472,7 +471,7 @@ private:
for (unsigned i = 1; i < item_count; ++i) for (unsigned i = 1; i < item_count; ++i)
{ {
if (overflows[i-1]) if (overflows[i - 1])
{ {
base_type & res_item = res.items[little(i)]; base_type & res_item = res.items[little(i)];
++res_item; ++res_item;
@ -532,6 +531,17 @@ private:
res.items[little(2)] = r12 >> 64; res.items[little(2)] = r12 >> 64;
return res; return res;
} }
else if constexpr (Bits == 128 && sizeof(base_type) == 8)
{
using CompilerUInt128 = unsigned __int128;
CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0];
CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0];
CompilerUInt128 c = a * b;
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
return res;
}
else else
{ {
integer<Bits, Signed> res{}; integer<Bits, Signed> res{};
@ -657,7 +667,7 @@ public:
} }
template <typename T> template <typename T>
constexpr static bool operator_more(const integer<Bits, Signed> & lhs, const T & rhs) noexcept constexpr static bool operator_greater(const integer<Bits, Signed> & lhs, const T & rhs) noexcept
{ {
if constexpr (should_keep_size<T>()) if constexpr (should_keep_size<T>())
{ {
@ -677,7 +687,7 @@ public:
else else
{ {
static_assert(IsWideInteger<T>::value); static_assert(IsWideInteger<T>::value);
return std::common_type_t<integer<Bits, Signed>, T>::_impl::operator_more(T(lhs), rhs); return std::common_type_t<integer<Bits, Signed>, T>::_impl::operator_greater(T(lhs), rhs);
} }
} }
@ -764,7 +774,6 @@ public:
} }
} }
private:
template <typename T> template <typename T>
constexpr static bool is_zero(const T & x) constexpr static bool is_zero(const T & x)
{ {
@ -781,46 +790,65 @@ private:
} }
/// returns quotient as result and remainder in numerator. /// returns quotient as result and remainder in numerator.
template <typename T> template <size_t Bits2>
constexpr static T divide(T & numerator, T && denominator) constexpr static integer<Bits2, unsigned> divide(integer<Bits2, unsigned> & numerator, integer<Bits2, unsigned> denominator)
{ {
if (is_zero(denominator)) static_assert(std::is_unsigned_v<Signed>);
throwError("divide by zero");
T & n = numerator; if constexpr (Bits == 128 && sizeof(base_type) == 8)
T & d = denominator;
T x = 1;
T quotient = 0;
while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * item_count - 1), 1), 0))
{ {
x = shift_left(x, 1); using CompilerUInt128 = unsigned __int128;
d = shift_left(d, 1);
CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0];
CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0];
CompilerUInt128 c = a / b;
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
CompilerUInt128 remainder = a - b * c;
numerator.items[0] = remainder;
numerator.items[1] = remainder >> 64;
return res;
} }
while (!operator_eq(x, 0)) if (is_zero(denominator))
throwError("Division by zero");
integer<Bits2, unsigned> x = 1;
integer<Bits2, unsigned> quotient = 0;
while (!operator_greater(denominator, numerator) && is_zero(operator_amp(shift_right(denominator, Bits2 - 1), 1)))
{ {
if (!operator_more(d, n)) x = shift_left(x, 1);
denominator = shift_left(denominator, 1);
}
while (!is_zero(x))
{
if (!operator_greater(denominator, numerator))
{ {
n = operator_minus(n, d); numerator = operator_minus(numerator, denominator);
quotient = operator_pipe(quotient, x); quotient = operator_pipe(quotient, x);
} }
x = shift_right(x, 1); x = shift_right(x, 1);
d = shift_right(d, 1); denominator = shift_right(denominator, 1);
} }
return quotient; return quotient;
} }
public:
template <typename T> template <typename T>
constexpr static auto operator_slash(const integer<Bits, Signed> & lhs, const T & rhs) constexpr static auto operator_slash(const integer<Bits, Signed> & lhs, const T & rhs)
{ {
if constexpr (should_keep_size<T>()) if constexpr (should_keep_size<T>())
{ {
integer<Bits, Signed> numerator = make_positive(lhs); integer<Bits, unsigned> numerator = make_positive(lhs);
integer<Bits, Signed> quotient = divide(numerator, make_positive(integer<Bits, Signed>(rhs))); integer<Bits, unsigned> denominator = make_positive(integer<Bits, Signed>(rhs));
integer<Bits, unsigned> quotient = integer<Bits, unsigned>::_impl::divide(numerator, std::move(denominator));
if (std::is_same_v<Signed, signed> && is_negative(rhs) != is_negative(lhs)) if (std::is_same_v<Signed, signed> && is_negative(rhs) != is_negative(lhs))
quotient = operator_unary_minus(quotient); quotient = operator_unary_minus(quotient);
@ -838,8 +866,9 @@ public:
{ {
if constexpr (should_keep_size<T>()) if constexpr (should_keep_size<T>())
{ {
integer<Bits, Signed> remainder = make_positive(lhs); integer<Bits, unsigned> remainder = make_positive(lhs);
divide(remainder, make_positive(integer<Bits, Signed>(rhs))); integer<Bits, unsigned> denominator = make_positive(integer<Bits, Signed>(rhs));
integer<Bits, unsigned>::_impl::divide(remainder, std::move(denominator));
if (std::is_same_v<Signed, signed> && is_negative(lhs)) if (std::is_same_v<Signed, signed> && is_negative(lhs))
remainder = operator_unary_minus(remainder); remainder = operator_unary_minus(remainder);
@ -905,7 +934,7 @@ public:
++c; ++c;
} }
else else
throwError("invalid char from"); throwError("Invalid char from");
} }
} }
else else
@ -913,7 +942,7 @@ public:
while (*c) while (*c)
{ {
if (*c < '0' || *c > '9') if (*c < '0' || *c > '9')
throwError("invalid char from"); throwError("Invalid char from");
res = multiply(res, 10U); res = multiply(res, 10U);
res = plus(res, *c - '0'); res = plus(res, *c - '0');
@ -930,11 +959,6 @@ public:
// Members // Members
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed>::integer() noexcept
: items{}
{}
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
template <typename T> template <typename T>
constexpr integer<Bits, Signed>::integer(T rhs) noexcept constexpr integer<Bits, Signed>::integer(T rhs) noexcept
@ -943,7 +967,7 @@ constexpr integer<Bits, Signed>::integer(T rhs) noexcept
if constexpr (IsWideInteger<T>::value) if constexpr (IsWideInteger<T>::value)
_impl::wide_integer_from_wide_integer(*this, rhs); _impl::wide_integer_from_wide_integer(*this, rhs);
else else
_impl::wide_integer_from_bultin(*this, rhs); _impl::wide_integer_from_builtin(*this, rhs);
} }
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
@ -956,10 +980,19 @@ constexpr integer<Bits, Signed>::integer(std::initializer_list<T> il) noexcept
if constexpr (IsWideInteger<T>::value) if constexpr (IsWideInteger<T>::value)
_impl::wide_integer_from_wide_integer(*this, *il.begin()); _impl::wide_integer_from_wide_integer(*this, *il.begin());
else else
_impl::wide_integer_from_bultin(*this, *il.begin()); _impl::wide_integer_from_builtin(*this, *il.begin());
}
else if (il.size() == 0)
{
_impl::wide_integer_from_builtin(*this, 0);
} }
else else
_impl::wide_integer_from_bultin(*this, 0); {
auto it = il.begin();
for (size_t i = 0; i < _impl::item_count; ++i)
if (it < il.end())
items[i] = *it;
}
} }
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
@ -974,7 +1007,7 @@ template <size_t Bits, typename Signed>
template <typename T> template <typename T>
constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator=(T rhs) noexcept constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator=(T rhs) noexcept
{ {
_impl::wide_integer_from_bultin(*this, rhs); _impl::wide_integer_from_builtin(*this, rhs);
return *this; return *this;
} }
@ -1057,7 +1090,7 @@ constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator>>=(int n) noex
{ {
if (static_cast<size_t>(n) >= Bits) if (static_cast<size_t>(n) >= Bits)
{ {
if (is_negative(*this)) if (_impl::is_negative(*this))
*this = -1; *this = -1;
else else
*this = 0; *this = 0;
@ -1107,16 +1140,17 @@ template <size_t Bits, typename Signed>
template <class T, class> template <class T, class>
constexpr integer<Bits, Signed>::operator T() const noexcept constexpr integer<Bits, Signed>::operator T() const noexcept
{ {
if constexpr (std::is_same_v<T, __int128>) static_assert(std::numeric_limits<T>::is_integer);
{
static_assert(Bits >= 128); /// NOTE: memcpy will suffice, but unfortunately, this function is constexpr.
return (__int128(items[1]) << 64) | items[0];
} using UnsignedT = std::make_unsigned_t<T>;
else
{ UnsignedT res{};
static_assert(std::numeric_limits<T>::is_integer); for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i)
return items[0]; res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i);
}
return res;
} }
template <size_t Bits, typename Signed> template <size_t Bits, typename Signed>
@ -1280,7 +1314,7 @@ template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept
{ {
if (static_cast<size_t>(n) >= Bits) if (static_cast<size_t>(n) >= Bits)
return 0; return integer<Bits, Signed>(0);
if (n <= 0) if (n <= 0)
return lhs; return lhs;
return integer<Bits, Signed>::_impl::shift_left(lhs, n); return integer<Bits, Signed>::_impl::shift_left(lhs, n);
@ -1289,7 +1323,7 @@ template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept
{ {
if (static_cast<size_t>(n) >= Bits) if (static_cast<size_t>(n) >= Bits)
return 0; return integer<Bits, Signed>(0);
if (n <= 0) if (n <= 0)
return lhs; return lhs;
return integer<Bits, Signed>::_impl::shift_right(lhs, n); return integer<Bits, Signed>::_impl::shift_right(lhs, n);
@ -1309,7 +1343,7 @@ constexpr bool operator<(const Arithmetic & lhs, const Arithmetic2 & rhs)
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs) constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs)
{ {
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_more(lhs, rhs); return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_greater(lhs, rhs);
} }
template <typename Arithmetic, typename Arithmetic2, class> template <typename Arithmetic, typename Arithmetic2, class>
constexpr bool operator>(const Arithmetic & lhs, const Arithmetic2 & rhs) constexpr bool operator>(const Arithmetic & lhs, const Arithmetic2 & rhs)
@ -1332,7 +1366,7 @@ constexpr bool operator<=(const Arithmetic & lhs, const Arithmetic2 & rhs)
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2> template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs) constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs)
{ {
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_more(lhs, rhs) return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_greater(lhs, rhs)
|| std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_eq(lhs, rhs); || std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_eq(lhs, rhs);
} }
template <typename Arithmetic, typename Arithmetic2, class> template <typename Arithmetic, typename Arithmetic2, class>

View File

@ -1,9 +1,12 @@
#pragma once #pragma once
#include <string> #include <string>
#include <ostream>
#include <fmt/format.h>
#include "wide_integer.h" #include "wide_integer.h"
namespace wide namespace wide
{ {
@ -33,3 +36,34 @@ inline std::string to_string(const integer<Bits, Signed> & n)
} }
} }
template <size_t Bits, typename Signed>
std::ostream & operator<<(std::ostream & out, const wide::integer<Bits, Signed> & value)
{
return out << to_string(value);
}
/// See https://fmt.dev/latest/api.html#formatting-user-defined-types
template <size_t Bits, typename Signed>
struct fmt::formatter<wide::integer<Bits, Signed>>
{
constexpr auto parse(format_parse_context & ctx)
{
auto it = ctx.begin();
auto end = ctx.end();
/// Only support {}.
if (it != end && *it != '}')
throw format_error("invalid format");
return it;
}
template <typename FormatContext>
auto format(const wide::integer<Bits, Signed> & value, FormatContext & ctx)
{
return format_to(ctx.out(), "{}", to_string(value));
}
};

View File

@ -35,7 +35,7 @@ PEERDIR(
CFLAGS(-g0) CFLAGS(-g0)
SRCS( SRCS(
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?> <? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F examples | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
) )
END() END()

View File

@ -102,7 +102,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
auto * logger = &Poco::Logger::get("SentryWriter"); auto * logger = &Poco::Logger::get("SentryWriter");
if (config.getBool("send_crash_reports.enabled", false)) if (config.getBool("send_crash_reports.enabled", false))
{ {
if (debug || (strlen(VERSION_OFFICIAL) > 0)) if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
{ {
enabled = true; enabled = true;
} }

View File

@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY)
add_headers_and_sources(glibc_compatibility .) add_headers_and_sources(glibc_compatibility .)
add_headers_and_sources(glibc_compatibility musl) add_headers_and_sources(glibc_compatibility musl)
if (ARCH_ARM) if (ARCH_AARCH64)
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s) list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
set (musl_arch_include_dir musl/aarch64) set (musl_arch_include_dir musl/aarch64)
elseif (ARCH_AMD64) elseif (ARCH_AMD64)

View File

@ -78,6 +78,9 @@
* *
*/ */
// Disable warnings by PVS-Studio
//-V::GA
static const double static const double
pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */ pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */ a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */

View File

@ -85,6 +85,9 @@
* *
*/ */
// Disable warnings by PVS-Studio
//-V::GA
#include <stdint.h> #include <stdint.h>
#include <math.h> #include <math.h>
#include "libm.h" #include "libm.h"

View File

@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x)
static inline void fp_force_evalf(float x) static inline void fp_force_evalf(float x)
{ {
volatile float y; volatile float y;
y = x; y = x; //-V1001
} }
#endif #endif
@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x)
static inline void fp_force_eval(double x) static inline void fp_force_eval(double x)
{ {
volatile double y; volatile double y;
y = x; y = x; //-V1001
} }
#endif #endif
@ -173,7 +173,7 @@ static inline void fp_force_eval(double x)
static inline void fp_force_evall(long double x) static inline void fp_force_evall(long double x)
{ {
volatile long double y; volatile long double y;
y = x; y = x; //-V1001
} }
#endif #endif

View File

@ -3,6 +3,9 @@
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
*/ */
// Disable warnings by PVS-Studio
//-V::GA
#include <math.h> #include <math.h>
#include <stdint.h> #include <stdint.h>
#include "libm.h" #include "libm.h"

View File

@ -40,7 +40,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addTextLog(log, text_log_max_priority); split->addTextLog(log, text_log_max_priority);
auto current_logger = config.getString("logger", ""); auto current_logger = config.getString("logger", "");
if (config_logger == current_logger) if (config_logger == current_logger) //-V1051
return; return;
config_logger = current_logger; config_logger = current_logger;
@ -69,7 +69,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
log_file->open(); log_file->open();
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this); Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, log_file); Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, log_file);
split->addChannel(log); split->addChannel(log);
@ -90,7 +90,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true")); error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this); Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> errorlog = new DB::OwnFormattingChannel(pf, error_log_file); Poco::AutoPtr<DB::OwnFormattingChannel> errorlog = new DB::OwnFormattingChannel(pf, error_log_file);
errorlog->setLevel(Poco::Message::PRIO_NOTICE); errorlog->setLevel(Poco::Message::PRIO_NOTICE);
@ -98,10 +98,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addChannel(errorlog); split->addChannel(errorlog);
} }
/// "dynamic_layer_selection" is needed only for Yandex.Metrika, that share part of ClickHouse code. if (config.getBool("logger.use_syslog", false))
/// We don't need this configuration parameter.
if (config.getBool("logger.use_syslog", false) || config.getBool("dynamic_layer_selection", false))
{ {
//const std::string & cmd_name = commandName(); //const std::string & cmd_name = commandName();
@ -127,7 +124,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
} }
syslog_channel->open(); syslog_channel->open();
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_LAYER_TAG); Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, syslog_channel); Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, syslog_channel);
split->addChannel(log); split->addChannel(log);
@ -141,7 +138,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
{ {
bool color_enabled = config.getBool("logger.color_terminal", color_logs_by_default); bool color_enabled = config.getBool("logger.color_terminal", color_logs_by_default);
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_NOTHING, color_enabled); Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(color_enabled);
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel);
logger.warning("Logging " + log_level + " to console"); logger.warning("Logging " + log_level + " to console");
split->addChannel(log); split->addChannel(log);

View File

@ -8,6 +8,7 @@
#include <Interpreters/TextLog.h> #include <Interpreters/TextLog.h>
#include "OwnSplitChannel.h" #include "OwnSplitChannel.h"
namespace Poco::Util namespace Poco::Util
{ {
class AbstractConfiguration; class AbstractConfiguration;
@ -21,16 +22,8 @@ public:
/// Close log files. On next log write files will be reopened. /// Close log files. On next log write files will be reopened.
void closeLogs(Poco::Logger & logger); void closeLogs(Poco::Logger & logger);
std::optional<size_t> getLayer() const
{
return layer; /// layer set in inheritor class BaseDaemonApplication.
}
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority); void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
protected:
std::optional<size_t> layer;
private: private:
Poco::AutoPtr<Poco::FileChannel> log_file; Poco::AutoPtr<Poco::FileChannel> log_file;
Poco::AutoPtr<Poco::FileChannel> error_log_file; Poco::AutoPtr<Poco::FileChannel> error_log_file;

View File

@ -13,31 +13,18 @@
#include "Loggers.h" #include "Loggers.h"
OwnPatternFormatter::OwnPatternFormatter(const Loggers * loggers_, OwnPatternFormatter::Options options_, bool color_) OwnPatternFormatter::OwnPatternFormatter(bool color_)
: Poco::PatternFormatter(""), loggers(loggers_), options(options_), color(color_) : Poco::PatternFormatter(""), color(color_)
{ {
} }
void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const
{ {
DB::WriteBufferFromString wb(text); DB::WriteBufferFromString wb(text);
const Poco::Message & msg = msg_ext.base; const Poco::Message & msg = msg_ext.base;
/// For syslog: tag must be before message and first whitespace.
/// This code is only used in Yandex.Metrika and unneeded in ClickHouse.
if ((options & ADD_LAYER_TAG) && loggers)
{
auto layer = loggers->getLayer();
if (layer)
{
writeCString("layer[", wb);
DB::writeIntText(*layer, wb);
writeCString("]: ", wb);
}
}
/// Change delimiters in date for compatibility with old logs. /// Change delimiters in date for compatibility with old logs.
DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb); DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb);

View File

@ -24,20 +24,11 @@ class Loggers;
class OwnPatternFormatter : public Poco::PatternFormatter class OwnPatternFormatter : public Poco::PatternFormatter
{ {
public: public:
/// ADD_LAYER_TAG is needed only for Yandex.Metrika, that share part of ClickHouse code. OwnPatternFormatter(bool color_ = false);
enum Options
{
ADD_NOTHING = 0,
ADD_LAYER_TAG = 1 << 0
};
OwnPatternFormatter(const Loggers * loggers_, Options options_ = ADD_NOTHING, bool color_ = false);
void format(const Poco::Message & msg, std::string & text) override; void format(const Poco::Message & msg, std::string & text) override;
void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text); void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const;
private: private:
const Loggers * loggers;
Options options;
bool color; bool color;
}; };

View File

@ -447,69 +447,6 @@ inline SrcIter uneven_copy(SrcIter src_first,
std::integral_constant<bool, DEST_IS_SMALLER>{}); std::integral_constant<bool, DEST_IS_SMALLER>{});
} }
/* generate_to, fill in a fixed-size array of integral type using a SeedSeq
* (actually works for any random-access iterator)
*/
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::true_type)
{
generator.generate(dest, dest+size);
}
template <size_t size, typename SeedSeq, typename DestIter>
void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::false_type)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr auto DEST_SIZE = sizeof(dest_t);
constexpr auto GEN_SIZE = sizeof(uint32_t);
constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE;
constexpr size_t FROM_ELEMS =
GEN_IS_SMALLER
? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE)
: (size + (GEN_SIZE / DEST_SIZE) - 1)
/ ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER);
// this odd code ^^^^^^^^^^^^^^^^^ is work-around for
// a bug: http://llvm.org/bugs/show_bug.cgi?id=21287
if (FROM_ELEMS <= 1024) {
uint32_t buffer[FROM_ELEMS];
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
} else {
uint32_t* buffer = static_cast<uint32_t*>(malloc(GEN_SIZE * FROM_ELEMS));
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
free(static_cast<void*>(buffer));
}
}
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to(SeedSeq&& generator, DestIter dest)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t);
generate_to_impl<size>(std::forward<SeedSeq>(generator), dest,
std::integral_constant<bool, IS_32BIT>{});
}
/* generate_one, produce a value of integral type using a SeedSeq
* (optionally, we can have it produce more than one and pick which one
* we want)
*/
template <typename UInt, size_t i = 0UL, size_t N = i+1UL, typename SeedSeq>
inline UInt generate_one(SeedSeq&& generator)
{
UInt result[N];
generate_to<N>(std::forward<SeedSeq>(generator), result);
return result[i];
}
template <typename RngType> template <typename RngType>
auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
-> typename RngType::result_type -> typename RngType::result_type
@ -517,7 +454,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
typedef typename RngType::result_type rtype; typedef typename RngType::result_type rtype;
rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound)
% upper_bound; % upper_bound;
for (;;) { for (;;) { //-V1044
rtype r = rng() - RngType::min(); rtype r = rng() - RngType::min();
if (r >= threshold) if (r >= threshold)
return r % upper_bound; return r % upper_bound;

View File

@ -928,7 +928,7 @@ struct rxs_m_xs_mixin {
constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1; constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift = bitcount_t rshift =
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift); internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier(); internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift; xtype result = internal >> shift;
@ -950,7 +950,7 @@ struct rxs_m_xs_mixin {
internal *= mcg_unmultiplier<itype>::unmultiplier(); internal *= mcg_unmultiplier<itype>::unmultiplier();
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal = unxorshift(internal, bits, opbits + rshift); internal = unxorshift(internal, bits, opbits + rshift);
return internal; return internal;
@ -975,7 +975,7 @@ struct rxs_m_mixin {
: 2; : 2;
constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1; constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift); internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier(); internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift; xtype result = internal >> shift;
@ -1366,7 +1366,7 @@ void extended<table_pow2,advance_pow2,baseclass,extvalclass,kdd>::selfinit()
// - any strange correlations would only be apparent if we // - any strange correlations would only be apparent if we
// were to backstep the generator so that the base generator // were to backstep the generator so that the base generator
// was generating the same values again // was generating the same values again
result_type xdiff = baseclass::operator()() - baseclass::operator()(); result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501
for (size_t i = 0; i < table_size; ++i) { for (size_t i = 0; i < table_size; ++i) {
data_[i] = baseclass::operator()() ^ xdiff; data_[i] = baseclass::operator()() ^ xdiff;
} }
@ -1643,22 +1643,22 @@ typedef setseq_base<pcg128_t, pcg128_t, xsl_rr_rr_mixin>
template <bitcount_t table_pow2, bitcount_t advance_pow2, template <bitcount_t table_pow2, bitcount_t advance_pow2,
typename BaseRNG, bool kdd = true> typename BaseRNG, bool kdd = true>
using ext_std8 = extended<table_pow2, advance_pow2, BaseRNG, using ext_std8 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
oneseq_rxs_m_xs_8_8, kdd>; oneseq_rxs_m_xs_8_8, kdd>;
template <bitcount_t table_pow2, bitcount_t advance_pow2, template <bitcount_t table_pow2, bitcount_t advance_pow2,
typename BaseRNG, bool kdd = true> typename BaseRNG, bool kdd = true>
using ext_std16 = extended<table_pow2, advance_pow2, BaseRNG, using ext_std16 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
oneseq_rxs_m_xs_16_16, kdd>; oneseq_rxs_m_xs_16_16, kdd>;
template <bitcount_t table_pow2, bitcount_t advance_pow2, template <bitcount_t table_pow2, bitcount_t advance_pow2,
typename BaseRNG, bool kdd = true> typename BaseRNG, bool kdd = true>
using ext_std32 = extended<table_pow2, advance_pow2, BaseRNG, using ext_std32 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
oneseq_rxs_m_xs_32_32, kdd>; oneseq_rxs_m_xs_32_32, kdd>;
template <bitcount_t table_pow2, bitcount_t advance_pow2, template <bitcount_t table_pow2, bitcount_t advance_pow2,
typename BaseRNG, bool kdd = true> typename BaseRNG, bool kdd = true>
using ext_std64 = extended<table_pow2, advance_pow2, BaseRNG, using ext_std64 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
oneseq_rxs_m_xs_64_64, kdd>; oneseq_rxs_m_xs_64_64, kdd>;

View File

@ -2,7 +2,7 @@ if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64)
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "") set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
endif() endif()
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES}) option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
# Broken in macos. TODO: update clang, re-test, enable on Apple # Broken in macos. TODO: update clang, re-test, enable on Apple
if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined")) if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined"))
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED}) option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED})
@ -24,9 +24,9 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt")
endif () endif ()
if (NOT USE_INTERNAL_LLVM_LIBRARY) if (NOT USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_PATHS "/usr/local/lib/llvm") set (LLVM_PATHS "/usr/local/lib/llvm" "/usr/lib/llvm")
foreach(llvm_v 10 9 8) foreach(llvm_v 11.1 11)
if (NOT LLVM_FOUND) if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif () endif ()
@ -102,7 +102,6 @@ LLVMRuntimeDyld
LLVMX86CodeGen LLVMX86CodeGen
LLVMX86Desc LLVMX86Desc
LLVMX86Info LLVMX86Info
LLVMX86Utils
LLVMAsmPrinter LLVMAsmPrinter
LLVMDebugInfoDWARF LLVMDebugInfoDWARF
LLVMGlobalISel LLVMGlobalISel

View File

@ -40,7 +40,7 @@ if (SANITIZE)
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
# keep the binary size down. # keep the binary size down.
# TODO: try compiling with -Og and with ld.gold. # TODO: try compiling with -Og and with ld.gold.
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")

View File

@ -12,6 +12,9 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin")
set (OS_DARWIN 1) set (OS_DARWIN 1)
add_definitions(-D OS_DARWIN) add_definitions(-D OS_DARWIN)
elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS")
set (OS_SUNOS 1)
add_definitions(-D OS_SUNOS)
endif () endif ()
if (CMAKE_CROSSCOMPILING) if (CMAKE_CROSSCOMPILING)

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 9f0ff347e50429686604002d8ad1fd07515c4f31 Subproject commit 1ccbb5a522a571ce83b606dbc2e1011c42ecccfb

@ -1 +1 @@
Subproject commit f915d35b2de676683493c86c585141a1e1c83334 Subproject commit 7d73d7610db31d4e1ecde0fb3a7ee90ef371207f

2
contrib/librdkafka vendored

@ -1 +1 @@
Subproject commit cf11d0aa36d4738f2c9bf4377807661660f1be76 Subproject commit 43491d33ca2826531d1e3cae70d4bf1e5249e3c9

2
contrib/llvm vendored

@ -1 +1 @@
Subproject commit 8f24d507c1cfeec66d27f48fe74518fd278e2d25 Subproject commit cfaf365cf96918999d09d976ec736b4518cf5d02

View File

@ -64,6 +64,8 @@ RUN groupadd -r clickhouse --gid=101 \
clickhouse-client=$version \ clickhouse-client=$version \
clickhouse-server=$version ; \ clickhouse-server=$version ; \
fi \ fi \
&& wget --progress=bar:force:noscroll "https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-$(dpkg --print-architecture)" -O /bin/gosu \
&& chmod +x /bin/gosu \
&& clickhouse-local -q 'SELECT * FROM system.build_options' \ && clickhouse-local -q 'SELECT * FROM system.build_options' \
&& rm -rf \ && rm -rf \
/var/lib/apt/lists/* \ /var/lib/apt/lists/* \
@ -76,8 +78,6 @@ RUN groupadd -r clickhouse --gid=101 \
# we need to allow "others" access to clickhouse folder, because docker container # we need to allow "others" access to clickhouse folder, because docker container
# can be started with arbitrary uid (openshift usecase) # can be started with arbitrary uid (openshift usecase)
ADD https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-amd64 /bin/gosu
RUN locale-gen en_US.UTF-8 RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8 ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en ENV LANGUAGE en_US:en
@ -88,10 +88,7 @@ RUN mkdir /docker-entrypoint-initdb.d
COPY docker_related_config.xml /etc/clickhouse-server/config.d/ COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
RUN chmod +x \
/entrypoint.sh \
/bin/gosu
EXPOSE 9000 8123 9009 EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse VOLUME /var/lib/clickhouse

View File

@ -51,13 +51,13 @@ RUN apt-get update \
# Sanitizer options for services (clickhouse-server) # Sanitizer options for services (clickhouse-server)
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \ RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
echo "MSAN_OPTIONS='abort_on_error=1'" >> /etc/environment; \ echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment; \
echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \ echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \
ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer;
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
# (but w/o verbosity for TSAN, otherwise test.reference will not match) # (but w/o verbosity for TSAN, otherwise test.reference will not match)
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7' ENV TSAN_OPTIONS='halt_on_error=1 history_size=7'
ENV UBSAN_OPTIONS='print_stacktrace=1' ENV UBSAN_OPTIONS='print_stacktrace=1'
ENV MSAN_OPTIONS='abort_on_error=1' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
CMD sleep 1 CMD sleep 1

View File

@ -308,6 +308,8 @@ function run_tests
01354_order_by_tuple_collate_const 01354_order_by_tuple_collate_const
01355_ilike 01355_ilike
01411_bayesian_ab_testing 01411_bayesian_ab_testing
01798_uniq_theta_sketch
01799_long_uniq_theta_sketch
collate collate
collation collation
_orc_ _orc_
@ -370,6 +372,10 @@ function run_tests
# Depends on AWS # Depends on AWS
01801_s3_cluster 01801_s3_cluster
# Depends on LLVM JIT
01852_jit_if
01865_jit_comparison_constant_result
) )
(time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" (time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"

View File

@ -14,11 +14,6 @@
<max_memory_usage> <max_memory_usage>
<max>10G</max> <max>10G</max>
</max_memory_usage> </max_memory_usage>
<!-- Not ready for production -->
<compile_expressions>
<readonly />
</compile_expressions>
</constraints> </constraints>
</default> </default>
</profiles> </profiles>

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal python-dateutil numpy
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce ENV DOCKER_VERSION 17.09.1-ce
@ -74,4 +74,3 @@ VOLUME /var/lib/docker
EXPOSE 2375 EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"] ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"] CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"]

View File

@ -31,10 +31,10 @@ toc_title: Cloud
## Alibaba Cloud {#alibaba-cloud} ## Alibaba Cloud {#alibaba-cloud}
Alibaba Cloud Managed Service for ClickHouse. [China Site](https://www.aliyun.com/product/clickhouse) (will be available at the international site in May 2021). Provides the following key features: [Alibaba Cloud Managed Service for ClickHouse](https://www.alibabacloud.com/product/clickhouse) provides the following key features:
- Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system - Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system
- Expand capacity on-demand without manual data migration - Expand capacity on demand without manual data migration
- Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering - Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering
- Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption - Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption
- Seamless integration with cloud log systems, databases, and data application tools - Seamless integration with cloud log systems, databases, and data application tools

View File

@ -124,4 +124,11 @@ Reboot.
To check if its working, you can use `ulimit -n` command. To check if its working, you can use `ulimit -n` command.
## Run ClickHouse server:
```
cd ClickHouse
./build/programs/clickhouse-server --config-file ./programs/server/config.xml
```
[Original article](https://clickhouse.tech/docs/en/development/build_osx/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/development/build_osx/) <!--hide-->

View File

@ -139,6 +139,7 @@ The following settings can be specified in configuration file for given endpoint
- `endpoint` — Specifies prefix of an endpoint. Mandatory. - `endpoint` — Specifies prefix of an endpoint. Mandatory.
- `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional. - `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional.
- `region` — Specifies S3 region name. Optional.
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`. - `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times. - `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
@ -152,6 +153,7 @@ The following settings can be specified in configuration file for given endpoint
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint> <endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> --> <!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> --> <!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> --> <!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> --> <!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> --> <!-- <header>Authorization: Bearer SOME-TOKEN</header> -->

View File

@ -739,6 +739,7 @@ Configuration markup:
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64> <server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64>
<proxy> <proxy>
<uri>http://proxy1</uri> <uri>http://proxy1</uri>
@ -764,6 +765,7 @@ Required parameters:
- `secret_access_key` — S3 secret access key. - `secret_access_key` — S3 secret access key.
Optional parameters: Optional parameters:
- `region` — S3 region name.
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.

View File

@ -21,120 +21,121 @@ echo https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performa
Creating a table: Creating a table:
``` sql ``` sql
CREATE TABLE `ontime` ( CREATE TABLE `ontime`
`Year` UInt16, (
`Quarter` UInt8, `Year` UInt16,
`Month` UInt8, `Quarter` UInt8,
`DayofMonth` UInt8, `Month` UInt8,
`DayOfWeek` UInt8, `DayofMonth` UInt8,
`FlightDate` Date, `DayOfWeek` UInt8,
`UniqueCarrier` FixedString(7), `FlightDate` Date,
`AirlineID` Int32, `Reporting_Airline` String,
`Carrier` FixedString(2), `DOT_ID_Reporting_Airline` Int32,
`TailNum` String, `IATA_CODE_Reporting_Airline` String,
`FlightNum` String, `Tail_Number` Int32,
`OriginAirportID` Int32, `Flight_Number_Reporting_Airline` String,
`OriginAirportSeqID` Int32, `OriginAirportID` Int32,
`OriginCityMarketID` Int32, `OriginAirportSeqID` Int32,
`Origin` FixedString(5), `OriginCityMarketID` Int32,
`OriginCityName` String, `Origin` FixedString(5),
`OriginState` FixedString(2), `OriginCityName` String,
`OriginStateFips` String, `OriginState` FixedString(2),
`OriginStateName` String, `OriginStateFips` String,
`OriginWac` Int32, `OriginStateName` String,
`DestAirportID` Int32, `OriginWac` Int32,
`DestAirportSeqID` Int32, `DestAirportID` Int32,
`DestCityMarketID` Int32, `DestAirportSeqID` Int32,
`Dest` FixedString(5), `DestCityMarketID` Int32,
`DestCityName` String, `Dest` FixedString(5),
`DestState` FixedString(2), `DestCityName` String,
`DestStateFips` String, `DestState` FixedString(2),
`DestStateName` String, `DestStateFips` String,
`DestWac` Int32, `DestStateName` String,
`CRSDepTime` Int32, `DestWac` Int32,
`DepTime` Int32, `CRSDepTime` Int32,
`DepDelay` Int32, `DepTime` Int32,
`DepDelayMinutes` Int32, `DepDelay` Int32,
`DepDel15` Int32, `DepDelayMinutes` Int32,
`DepartureDelayGroups` String, `DepDel15` Int32,
`DepTimeBlk` String, `DepartureDelayGroups` String,
`TaxiOut` Int32, `DepTimeBlk` String,
`WheelsOff` Int32, `TaxiOut` Int32,
`WheelsOn` Int32, `WheelsOff` Int32,
`TaxiIn` Int32, `WheelsOn` Int32,
`CRSArrTime` Int32, `TaxiIn` Int32,
`ArrTime` Int32, `CRSArrTime` Int32,
`ArrDelay` Int32, `ArrTime` Int32,
`ArrDelayMinutes` Int32, `ArrDelay` Int32,
`ArrDel15` Int32, `ArrDelayMinutes` Int32,
`ArrivalDelayGroups` Int32, `ArrDel15` Int32,
`ArrTimeBlk` String, `ArrivalDelayGroups` Int32,
`Cancelled` UInt8, `ArrTimeBlk` String,
`CancellationCode` FixedString(1), `Cancelled` UInt8,
`Diverted` UInt8, `CancellationCode` FixedString(1),
`CRSElapsedTime` Int32, `Diverted` UInt8,
`ActualElapsedTime` Int32, `CRSElapsedTime` Int32,
`AirTime` Int32, `ActualElapsedTime` Int32,
`Flights` Int32, `AirTime` Nullable(Int32),
`Distance` Int32, `Flights` Int32,
`DistanceGroup` UInt8, `Distance` Int32,
`CarrierDelay` Int32, `DistanceGroup` UInt8,
`WeatherDelay` Int32, `CarrierDelay` Int32,
`NASDelay` Int32, `WeatherDelay` Int32,
`SecurityDelay` Int32, `NASDelay` Int32,
`LateAircraftDelay` Int32, `SecurityDelay` Int32,
`FirstDepTime` String, `LateAircraftDelay` Int32,
`TotalAddGTime` String, `FirstDepTime` String,
`LongestAddGTime` String, `TotalAddGTime` String,
`DivAirportLandings` String, `LongestAddGTime` String,
`DivReachedDest` String, `DivAirportLandings` String,
`DivActualElapsedTime` String, `DivReachedDest` String,
`DivArrDelay` String, `DivActualElapsedTime` String,
`DivDistance` String, `DivArrDelay` String,
`Div1Airport` String, `DivDistance` String,
`Div1AirportID` Int32, `Div1Airport` String,
`Div1AirportSeqID` Int32, `Div1AirportID` Int32,
`Div1WheelsOn` String, `Div1AirportSeqID` Int32,
`Div1TotalGTime` String, `Div1WheelsOn` String,
`Div1LongestGTime` String, `Div1TotalGTime` String,
`Div1WheelsOff` String, `Div1LongestGTime` String,
`Div1TailNum` String, `Div1WheelsOff` String,
`Div2Airport` String, `Div1TailNum` String,
`Div2AirportID` Int32, `Div2Airport` String,
`Div2AirportSeqID` Int32, `Div2AirportID` Int32,
`Div2WheelsOn` String, `Div2AirportSeqID` Int32,
`Div2TotalGTime` String, `Div2WheelsOn` String,
`Div2LongestGTime` String, `Div2TotalGTime` String,
`Div2WheelsOff` String, `Div2LongestGTime` String,
`Div2TailNum` String, `Div2WheelsOff` String,
`Div3Airport` String, `Div2TailNum` String,
`Div3AirportID` Int32, `Div3Airport` String,
`Div3AirportSeqID` Int32, `Div3AirportID` Int32,
`Div3WheelsOn` String, `Div3AirportSeqID` Int32,
`Div3TotalGTime` String, `Div3WheelsOn` String,
`Div3LongestGTime` String, `Div3TotalGTime` String,
`Div3WheelsOff` String, `Div3LongestGTime` String,
`Div3TailNum` String, `Div3WheelsOff` String,
`Div4Airport` String, `Div3TailNum` String,
`Div4AirportID` Int32, `Div4Airport` String,
`Div4AirportSeqID` Int32, `Div4AirportID` Int32,
`Div4WheelsOn` String, `Div4AirportSeqID` Int32,
`Div4TotalGTime` String, `Div4WheelsOn` String,
`Div4LongestGTime` String, `Div4TotalGTime` String,
`Div4WheelsOff` String, `Div4LongestGTime` String,
`Div4TailNum` String, `Div4WheelsOff` String,
`Div5Airport` String, `Div4TailNum` String,
`Div5AirportID` Int32, `Div5Airport` String,
`Div5AirportSeqID` Int32, `Div5AirportID` Int32,
`Div5WheelsOn` String, `Div5AirportSeqID` Int32,
`Div5TotalGTime` String, `Div5WheelsOn` String,
`Div5LongestGTime` String, `Div5TotalGTime` String,
`Div5WheelsOff` String, `Div5LongestGTime` String,
`Div5TailNum` String `Div5WheelsOff` String,
`Div5TailNum` String
) ENGINE = MergeTree ) ENGINE = MergeTree
PARTITION BY Year PARTITION BY Year
ORDER BY (Carrier, FlightDate) ORDER BY (IATA_CODE_Reporting_Airline, FlightDate)
SETTINGS index_granularity = 8192; SETTINGS index_granularity = 8192;
``` ```
Loading data with multiple threads: Loading data with multiple threads:
@ -206,7 +207,7 @@ LIMIT 10;
Q4. The number of delays by carrier for 2007 Q4. The number of delays by carrier for 2007
``` sql ``` sql
SELECT Carrier, count(*) SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*)
FROM ontime FROM ontime
WHERE DepDelay>10 AND Year=2007 WHERE DepDelay>10 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -220,29 +221,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year=2007 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
Better version of the same query: Better version of the same query:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -256,29 +257,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year>=2000 AND Year<=2008 AND Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
Better version of the same query: Better version of the same query:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
@ -297,7 +298,7 @@ FROM
from ontime from ontime
WHERE DepDelay>10 WHERE DepDelay>10
GROUP BY Year GROUP BY Year
) ) q
JOIN JOIN
( (
select select
@ -305,7 +306,7 @@ JOIN
count(*) as c2 count(*) as c2
from ontime from ontime
GROUP BY Year GROUP BY Year
) USING (Year) ) qq USING (Year)
ORDER BY Year; ORDER BY Year;
``` ```
@ -340,7 +341,7 @@ Q10.
``` sql ``` sql
SELECT SELECT
min(Year), max(Year), Carrier, count(*) AS cnt, min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt,
sum(ArrDelayMinutes>30) AS flights_delayed, sum(ArrDelayMinutes>30) AS flights_delayed,
round(sum(ArrDelayMinutes>30)/count(*),2) AS rate round(sum(ArrDelayMinutes>30)/count(*),2) AS rate
FROM ontime FROM ontime

View File

@ -102,7 +102,9 @@ For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse buil
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub. After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data.
Run `sudo ./clickhouse install` if you want to install clickhouse system-wide (also with needed condiguration files, configuring users etc.). After that run `clickhouse start` commands to start the clickhouse-server and `clickhouse-client` to connect to it.
These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available. These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available.

View File

@ -101,6 +101,9 @@ Privileges can be granted to a role by the [GRANT](../sql-reference/statements/g
Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy.
!!! note "Warning"
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
Management queries: Management queries:
- [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md) - [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md)

View File

@ -430,7 +430,7 @@ Keys for syslog:
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- format Message format. Possible values: `bsd` and `syslog.` - format Message format. Possible values: `bsd` and `syslog.`
## send_crash_reports {#server_configuration_parameters-logger} ## send_crash_reports {#server_configuration_parameters-send_crash_reports}
Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io). Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io).
Enabling it, especially in pre-production environments, is highly appreciated. Enabling it, especially in pre-production environments, is highly appreciated.

View File

@ -143,6 +143,16 @@ Possible values:
Default value: 0. Default value: 0.
## http_max_uri_size {#http-max-uri-size}
Sets the maximum URI length of an HTTP request.
Possible values:
- Positive integer.
Default value: 1048576.
## send_progress_in_http_headers {#settings-send_progress_in_http_headers} ## send_progress_in_http_headers {#settings-send_progress_in_http_headers}
Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses.

View File

@ -21,6 +21,7 @@ Columns:
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found.
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary. - `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary.
@ -60,4 +61,4 @@ SELECT * FROM system.dictionaries
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->

View File

@ -18,6 +18,10 @@ Columns:
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in local files, in bytes. - `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in local files, in bytes.
- `broken_data_files` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of files that has been marked as broken (due to an error).
- `broken_data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in broken files, in bytes.
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any). - `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
**Example** **Example**

View File

@ -12,6 +12,9 @@ The result depends on the order of running the query, and is nondeterministic.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
!!! note "Note"
Using `quantileTDigestWeighted` [is not recommended for tiny data sets](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) and can lead to significat error. In this case, consider possibility of using [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) instead.
**Syntax** **Syntax**
``` sql ``` sql

View File

@ -38,3 +38,4 @@ We recommend using this function in almost all scenarios.
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)

View File

@ -49,3 +49,4 @@ Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)

View File

@ -23,3 +23,4 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, `
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12)
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)

View File

@ -37,3 +37,4 @@ We dont recommend using this function. In most cases, use the [uniq](../../..
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)

View File

@ -0,0 +1,39 @@
---
toc_priority: 195
---
# uniqThetaSketch {#agg_function-uniqthetasketch}
Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
``` sql
uniqThetaSketch(x[, ...])
```
**Arguments**
The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types.
**Returned value**
- A [UInt64](../../../sql-reference/data-types/int-uint.md)-type number.
**Implementation details**
Function:
- Calculates a hash for all parameters in the aggregate, then uses it in calculations.
- Uses the [KMV](https://datasketches.apache.org/docs/Theta/InverseEstimate.html) algorithm to approximate the number of different argument values.
4096(2^12) 64-bit sketch are used. The size of the state is about 41 KB.
- The relative error is 3.125% (95% confidence), see the [relative error table](https://datasketches.apache.org/docs/Theta/ThetaErrorTable.html) for detail.
**See Also**
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)

View File

@ -224,7 +224,7 @@ assumeNotNull(x)
**Returned values** **Returned values**
- The original value from the non-`Nullable` type, if it is not `NULL`. - The original value from the non-`Nullable` type, if it is not `NULL`.
- The default value for the non-`Nullable` type if the original value was `NULL`. - Implementation specific result if the original value was `NULL`.
**Example** **Example**

View File

@ -7,6 +7,9 @@ toc_title: ROW POLICY
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
!!! note "Warning"
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
Syntax: Syntax:
``` sql ``` sql

View File

@ -47,9 +47,9 @@ Union
### EXPLAIN AST {#explain-ast} ### EXPLAIN AST {#explain-ast}
Dump query AST. Dump query AST. Supports all types of queries, not only `SELECT`.
Example: Examples:
```sql ```sql
EXPLAIN AST SELECT 1; EXPLAIN AST SELECT 1;
@ -63,6 +63,22 @@ SelectWithUnionQuery (children 1)
Literal UInt64_1 Literal UInt64_1
``` ```
```sql
EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today();
```
```sql
explain
AlterQuery t1 (children 1)
ExpressionList (children 1)
AlterCommand 27 (children 1)
Function equals (children 1)
ExpressionList (children 2)
Identifier date
Function today (children 1)
ExpressionList
```
### EXPLAIN SYNTAX {#explain-syntax} ### EXPLAIN SYNTAX {#explain-syntax}
Returns query after syntax optimizations. Returns query after syntax optimizations.

View File

@ -22,7 +22,7 @@ toc_title: "\u30AF\u30E9\u30A6\u30C9"
## Alibaba Cloud {#alibaba-cloud} ## Alibaba Cloud {#alibaba-cloud}
ClickHouseのためのAlibaba Cloudの管理サービス [中国サイト](https://www.aliyun.com/product/clickhouse) (2021年5月に国際サイトで利用可能になります) 次の主な機能を提供します: [ClickHouseのためのAlibaba Cloudの管理サービス](https://www.alibabacloud.com/product/clickhouse) 次の主な機能を提供します:
- Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン - Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン
- 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張 - 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張

View File

@ -29,126 +29,127 @@ done
テーブルの作成: テーブルの作成:
``` sql ``` sql
CREATE TABLE `ontime` ( CREATE TABLE `ontime`
`Year` UInt16, (
`Quarter` UInt8, `Year` UInt16,
`Month` UInt8, `Quarter` UInt8,
`DayofMonth` UInt8, `Month` UInt8,
`DayOfWeek` UInt8, `DayofMonth` UInt8,
`FlightDate` Date, `DayOfWeek` UInt8,
`UniqueCarrier` FixedString(7), `FlightDate` Date,
`AirlineID` Int32, `Reporting_Airline` String,
`Carrier` FixedString(2), `DOT_ID_Reporting_Airline` Int32,
`TailNum` String, `IATA_CODE_Reporting_Airline` String,
`FlightNum` String, `Tail_Number` Int32,
`OriginAirportID` Int32, `Flight_Number_Reporting_Airline` String,
`OriginAirportSeqID` Int32, `OriginAirportID` Int32,
`OriginCityMarketID` Int32, `OriginAirportSeqID` Int32,
`Origin` FixedString(5), `OriginCityMarketID` Int32,
`OriginCityName` String, `Origin` FixedString(5),
`OriginState` FixedString(2), `OriginCityName` String,
`OriginStateFips` String, `OriginState` FixedString(2),
`OriginStateName` String, `OriginStateFips` String,
`OriginWac` Int32, `OriginStateName` String,
`DestAirportID` Int32, `OriginWac` Int32,
`DestAirportSeqID` Int32, `DestAirportID` Int32,
`DestCityMarketID` Int32, `DestAirportSeqID` Int32,
`Dest` FixedString(5), `DestCityMarketID` Int32,
`DestCityName` String, `Dest` FixedString(5),
`DestState` FixedString(2), `DestCityName` String,
`DestStateFips` String, `DestState` FixedString(2),
`DestStateName` String, `DestStateFips` String,
`DestWac` Int32, `DestStateName` String,
`CRSDepTime` Int32, `DestWac` Int32,
`DepTime` Int32, `CRSDepTime` Int32,
`DepDelay` Int32, `DepTime` Int32,
`DepDelayMinutes` Int32, `DepDelay` Int32,
`DepDel15` Int32, `DepDelayMinutes` Int32,
`DepartureDelayGroups` String, `DepDel15` Int32,
`DepTimeBlk` String, `DepartureDelayGroups` String,
`TaxiOut` Int32, `DepTimeBlk` String,
`WheelsOff` Int32, `TaxiOut` Int32,
`WheelsOn` Int32, `WheelsOff` Int32,
`TaxiIn` Int32, `WheelsOn` Int32,
`CRSArrTime` Int32, `TaxiIn` Int32,
`ArrTime` Int32, `CRSArrTime` Int32,
`ArrDelay` Int32, `ArrTime` Int32,
`ArrDelayMinutes` Int32, `ArrDelay` Int32,
`ArrDel15` Int32, `ArrDelayMinutes` Int32,
`ArrivalDelayGroups` Int32, `ArrDel15` Int32,
`ArrTimeBlk` String, `ArrivalDelayGroups` Int32,
`Cancelled` UInt8, `ArrTimeBlk` String,
`CancellationCode` FixedString(1), `Cancelled` UInt8,
`Diverted` UInt8, `CancellationCode` FixedString(1),
`CRSElapsedTime` Int32, `Diverted` UInt8,
`ActualElapsedTime` Int32, `CRSElapsedTime` Int32,
`AirTime` Int32, `ActualElapsedTime` Int32,
`Flights` Int32, `AirTime` Nullable(Int32),
`Distance` Int32, `Flights` Int32,
`DistanceGroup` UInt8, `Distance` Int32,
`CarrierDelay` Int32, `DistanceGroup` UInt8,
`WeatherDelay` Int32, `CarrierDelay` Int32,
`NASDelay` Int32, `WeatherDelay` Int32,
`SecurityDelay` Int32, `NASDelay` Int32,
`LateAircraftDelay` Int32, `SecurityDelay` Int32,
`FirstDepTime` String, `LateAircraftDelay` Int32,
`TotalAddGTime` String, `FirstDepTime` String,
`LongestAddGTime` String, `TotalAddGTime` String,
`DivAirportLandings` String, `LongestAddGTime` String,
`DivReachedDest` String, `DivAirportLandings` String,
`DivActualElapsedTime` String, `DivReachedDest` String,
`DivArrDelay` String, `DivActualElapsedTime` String,
`DivDistance` String, `DivArrDelay` String,
`Div1Airport` String, `DivDistance` String,
`Div1AirportID` Int32, `Div1Airport` String,
`Div1AirportSeqID` Int32, `Div1AirportID` Int32,
`Div1WheelsOn` String, `Div1AirportSeqID` Int32,
`Div1TotalGTime` String, `Div1WheelsOn` String,
`Div1LongestGTime` String, `Div1TotalGTime` String,
`Div1WheelsOff` String, `Div1LongestGTime` String,
`Div1TailNum` String, `Div1WheelsOff` String,
`Div2Airport` String, `Div1TailNum` String,
`Div2AirportID` Int32, `Div2Airport` String,
`Div2AirportSeqID` Int32, `Div2AirportID` Int32,
`Div2WheelsOn` String, `Div2AirportSeqID` Int32,
`Div2TotalGTime` String, `Div2WheelsOn` String,
`Div2LongestGTime` String, `Div2TotalGTime` String,
`Div2WheelsOff` String, `Div2LongestGTime` String,
`Div2TailNum` String, `Div2WheelsOff` String,
`Div3Airport` String, `Div2TailNum` String,
`Div3AirportID` Int32, `Div3Airport` String,
`Div3AirportSeqID` Int32, `Div3AirportID` Int32,
`Div3WheelsOn` String, `Div3AirportSeqID` Int32,
`Div3TotalGTime` String, `Div3WheelsOn` String,
`Div3LongestGTime` String, `Div3TotalGTime` String,
`Div3WheelsOff` String, `Div3LongestGTime` String,
`Div3TailNum` String, `Div3WheelsOff` String,
`Div4Airport` String, `Div3TailNum` String,
`Div4AirportID` Int32, `Div4Airport` String,
`Div4AirportSeqID` Int32, `Div4AirportID` Int32,
`Div4WheelsOn` String, `Div4AirportSeqID` Int32,
`Div4TotalGTime` String, `Div4WheelsOn` String,
`Div4LongestGTime` String, `Div4TotalGTime` String,
`Div4WheelsOff` String, `Div4LongestGTime` String,
`Div4TailNum` String, `Div4WheelsOff` String,
`Div5Airport` String, `Div4TailNum` String,
`Div5AirportID` Int32, `Div5Airport` String,
`Div5AirportSeqID` Int32, `Div5AirportID` Int32,
`Div5WheelsOn` String, `Div5AirportSeqID` Int32,
`Div5TotalGTime` String, `Div5WheelsOn` String,
`Div5LongestGTime` String, `Div5TotalGTime` String,
`Div5WheelsOff` String, `Div5LongestGTime` String,
`Div5TailNum` String `Div5WheelsOff` String,
`Div5TailNum` String
) ENGINE = MergeTree ) ENGINE = MergeTree
PARTITION BY Year PARTITION BY Year
ORDER BY (Carrier, FlightDate) ORDER BY (IATA_CODE_Reporting_Airline, FlightDate)
SETTINGS index_granularity = 8192; SETTINGS index_granularity = 8192;
``` ```
データのロード: データのロード:
``` bash ``` bash
$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'"
``` ```
## パーティション済みデータのダウンロード {#download-of-prepared-partitions} ## パーティション済みデータのダウンロード {#download-of-prepared-partitions}
@ -212,10 +213,10 @@ LIMIT 10;
Q4. 2007年のキャリア別の遅延の数 Q4. 2007年のキャリア別の遅延の数
``` sql ``` sql
SELECT Carrier, count(*) SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*)
FROM ontime FROM ontime
WHERE DepDelay>10 AND Year=2007 WHERE DepDelay>10 AND Year=2007
GROUP BY Carrier GROUP BY IATA_CODE_Reporting_Airline
ORDER BY count(*) DESC; ORDER BY count(*) DESC;
``` ```
@ -226,32 +227,32 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year=2007 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
同じクエリのより良いバージョン: 同じクエリのより良いバージョン:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY IATA_CODE_Reporting_Airline
ORDER BY c3 DESC ORDER BY c3 DESC
``` ```
@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year>=2000 AND Year<=2008 AND Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
同じクエリのより良いバージョン: 同じクエリのより良いバージョン:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
@ -303,7 +304,7 @@ FROM
from ontime from ontime
WHERE DepDelay>10 WHERE DepDelay>10
GROUP BY Year GROUP BY Year
) ) q
JOIN JOIN
( (
select select
@ -311,7 +312,7 @@ JOIN
count(*) as c2 count(*) as c2
from ontime from ontime
GROUP BY Year GROUP BY Year
) USING (Year) ) qq USING (Year)
ORDER BY Year; ORDER BY Year;
``` ```
@ -346,7 +347,7 @@ Q10.
``` sql ``` sql
SELECT SELECT
min(Year), max(Year), Carrier, count(*) AS cnt, min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt,
sum(ArrDelayMinutes>30) AS flights_delayed, sum(ArrDelayMinutes>30) AS flights_delayed,
round(sum(ArrDelayMinutes>30)/count(*),2) AS rate round(sum(ArrDelayMinutes>30)/count(*),2) AS rate
FROM ontime FROM ontime

View File

@ -82,6 +82,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
Необязательные настройки: Необязательные настройки:
- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса. - `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса.
- `region` — название региона S3.
- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной точки приема запроса. Значение по умолчанию - `false`. - `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной точки приема запроса. Значение по умолчанию - `false`.
- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз. - `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз.
- `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C. - `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C.
@ -94,6 +95,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint> <endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> --> <!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> --> <!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> --> <!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> --> <!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> --> <!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->

View File

@ -727,6 +727,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<proxy> <proxy>
<uri>http://proxy1</uri> <uri>http://proxy1</uri>
<uri>http://proxy2</uri> <uri>http://proxy2</uri>
@ -753,6 +754,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
Необязательные параметры: Необязательные параметры:
- `region` — название региона S3.
- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. - `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`.
- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`. - `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`.
- `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера. - `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера.

View File

@ -27,126 +27,127 @@ done
Создание таблицы: Создание таблицы:
``` sql ``` sql
CREATE TABLE `ontime` ( CREATE TABLE `ontime`
`Year` UInt16, (
`Quarter` UInt8, `Year` UInt16,
`Month` UInt8, `Quarter` UInt8,
`DayofMonth` UInt8, `Month` UInt8,
`DayOfWeek` UInt8, `DayofMonth` UInt8,
`FlightDate` Date, `DayOfWeek` UInt8,
`UniqueCarrier` FixedString(7), `FlightDate` Date,
`AirlineID` Int32, `Reporting_Airline` String,
`Carrier` FixedString(2), `DOT_ID_Reporting_Airline` Int32,
`TailNum` String, `IATA_CODE_Reporting_Airline` String,
`FlightNum` String, `Tail_Number` Int32,
`OriginAirportID` Int32, `Flight_Number_Reporting_Airline` String,
`OriginAirportSeqID` Int32, `OriginAirportID` Int32,
`OriginCityMarketID` Int32, `OriginAirportSeqID` Int32,
`Origin` FixedString(5), `OriginCityMarketID` Int32,
`OriginCityName` String, `Origin` FixedString(5),
`OriginState` FixedString(2), `OriginCityName` String,
`OriginStateFips` String, `OriginState` FixedString(2),
`OriginStateName` String, `OriginStateFips` String,
`OriginWac` Int32, `OriginStateName` String,
`DestAirportID` Int32, `OriginWac` Int32,
`DestAirportSeqID` Int32, `DestAirportID` Int32,
`DestCityMarketID` Int32, `DestAirportSeqID` Int32,
`Dest` FixedString(5), `DestCityMarketID` Int32,
`DestCityName` String, `Dest` FixedString(5),
`DestState` FixedString(2), `DestCityName` String,
`DestStateFips` String, `DestState` FixedString(2),
`DestStateName` String, `DestStateFips` String,
`DestWac` Int32, `DestStateName` String,
`CRSDepTime` Int32, `DestWac` Int32,
`DepTime` Int32, `CRSDepTime` Int32,
`DepDelay` Int32, `DepTime` Int32,
`DepDelayMinutes` Int32, `DepDelay` Int32,
`DepDel15` Int32, `DepDelayMinutes` Int32,
`DepartureDelayGroups` String, `DepDel15` Int32,
`DepTimeBlk` String, `DepartureDelayGroups` String,
`TaxiOut` Int32, `DepTimeBlk` String,
`WheelsOff` Int32, `TaxiOut` Int32,
`WheelsOn` Int32, `WheelsOff` Int32,
`TaxiIn` Int32, `WheelsOn` Int32,
`CRSArrTime` Int32, `TaxiIn` Int32,
`ArrTime` Int32, `CRSArrTime` Int32,
`ArrDelay` Int32, `ArrTime` Int32,
`ArrDelayMinutes` Int32, `ArrDelay` Int32,
`ArrDel15` Int32, `ArrDelayMinutes` Int32,
`ArrivalDelayGroups` Int32, `ArrDel15` Int32,
`ArrTimeBlk` String, `ArrivalDelayGroups` Int32,
`Cancelled` UInt8, `ArrTimeBlk` String,
`CancellationCode` FixedString(1), `Cancelled` UInt8,
`Diverted` UInt8, `CancellationCode` FixedString(1),
`CRSElapsedTime` Int32, `Diverted` UInt8,
`ActualElapsedTime` Int32, `CRSElapsedTime` Int32,
`AirTime` Int32, `ActualElapsedTime` Int32,
`Flights` Int32, `AirTime` Nullable(Int32),
`Distance` Int32, `Flights` Int32,
`DistanceGroup` UInt8, `Distance` Int32,
`CarrierDelay` Int32, `DistanceGroup` UInt8,
`WeatherDelay` Int32, `CarrierDelay` Int32,
`NASDelay` Int32, `WeatherDelay` Int32,
`SecurityDelay` Int32, `NASDelay` Int32,
`LateAircraftDelay` Int32, `SecurityDelay` Int32,
`FirstDepTime` String, `LateAircraftDelay` Int32,
`TotalAddGTime` String, `FirstDepTime` String,
`LongestAddGTime` String, `TotalAddGTime` String,
`DivAirportLandings` String, `LongestAddGTime` String,
`DivReachedDest` String, `DivAirportLandings` String,
`DivActualElapsedTime` String, `DivReachedDest` String,
`DivArrDelay` String, `DivActualElapsedTime` String,
`DivDistance` String, `DivArrDelay` String,
`Div1Airport` String, `DivDistance` String,
`Div1AirportID` Int32, `Div1Airport` String,
`Div1AirportSeqID` Int32, `Div1AirportID` Int32,
`Div1WheelsOn` String, `Div1AirportSeqID` Int32,
`Div1TotalGTime` String, `Div1WheelsOn` String,
`Div1LongestGTime` String, `Div1TotalGTime` String,
`Div1WheelsOff` String, `Div1LongestGTime` String,
`Div1TailNum` String, `Div1WheelsOff` String,
`Div2Airport` String, `Div1TailNum` String,
`Div2AirportID` Int32, `Div2Airport` String,
`Div2AirportSeqID` Int32, `Div2AirportID` Int32,
`Div2WheelsOn` String, `Div2AirportSeqID` Int32,
`Div2TotalGTime` String, `Div2WheelsOn` String,
`Div2LongestGTime` String, `Div2TotalGTime` String,
`Div2WheelsOff` String, `Div2LongestGTime` String,
`Div2TailNum` String, `Div2WheelsOff` String,
`Div3Airport` String, `Div2TailNum` String,
`Div3AirportID` Int32, `Div3Airport` String,
`Div3AirportSeqID` Int32, `Div3AirportID` Int32,
`Div3WheelsOn` String, `Div3AirportSeqID` Int32,
`Div3TotalGTime` String, `Div3WheelsOn` String,
`Div3LongestGTime` String, `Div3TotalGTime` String,
`Div3WheelsOff` String, `Div3LongestGTime` String,
`Div3TailNum` String, `Div3WheelsOff` String,
`Div4Airport` String, `Div3TailNum` String,
`Div4AirportID` Int32, `Div4Airport` String,
`Div4AirportSeqID` Int32, `Div4AirportID` Int32,
`Div4WheelsOn` String, `Div4AirportSeqID` Int32,
`Div4TotalGTime` String, `Div4WheelsOn` String,
`Div4LongestGTime` String, `Div4TotalGTime` String,
`Div4WheelsOff` String, `Div4LongestGTime` String,
`Div4TailNum` String, `Div4WheelsOff` String,
`Div5Airport` String, `Div4TailNum` String,
`Div5AirportID` Int32, `Div5Airport` String,
`Div5AirportSeqID` Int32, `Div5AirportID` Int32,
`Div5WheelsOn` String, `Div5AirportSeqID` Int32,
`Div5TotalGTime` String, `Div5WheelsOn` String,
`Div5LongestGTime` String, `Div5TotalGTime` String,
`Div5WheelsOff` String, `Div5LongestGTime` String,
`Div5TailNum` String `Div5WheelsOff` String,
`Div5TailNum` String
) ENGINE = MergeTree ) ENGINE = MergeTree
PARTITION BY Year PARTITION BY Year
ORDER BY (Carrier, FlightDate) ORDER BY (IATA_CODE_Reporting_Airline, FlightDate)
SETTINGS index_granularity = 8192; SETTINGS index_granularity = 8192;
``` ```
Загрузка данных: Загрузка данных:
``` bash ``` bash
$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'"
``` ```
## Скачивание готовых партиций {#skachivanie-gotovykh-partitsii} ## Скачивание готовых партиций {#skachivanie-gotovykh-partitsii}
@ -211,7 +212,7 @@ LIMIT 10;
Q4. Количество задержек по перевозчикам за 2007 год Q4. Количество задержек по перевозчикам за 2007 год
``` sql ``` sql
SELECT Carrier, count(*) SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*)
FROM ontime FROM ontime
WHERE DepDelay>10 AND Year=2007 WHERE DepDelay>10 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -225,29 +226,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year=2007 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
Более оптимальная версия того же запроса: Более оптимальная версия того же запроса:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -261,29 +262,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year>=2000 AND Year<=2008 AND Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
Более оптимальная версия того же запроса: Более оптимальная версия того же запроса:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
@ -302,7 +303,7 @@ FROM
from ontime from ontime
WHERE DepDelay>10 WHERE DepDelay>10
GROUP BY Year GROUP BY Year
) ) q
JOIN JOIN
( (
select select
@ -310,7 +311,7 @@ JOIN
count(*) as c2 count(*) as c2
from ontime from ontime
GROUP BY Year GROUP BY Year
) USING (Year) ) qq USING (Year)
ORDER BY Year; ORDER BY Year;
``` ```
@ -346,7 +347,7 @@ Q10.
``` sql ``` sql
SELECT SELECT
min(Year), max(Year), Carrier, count(*) AS cnt, min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt,
sum(ArrDelayMinutes>30) AS flights_delayed, sum(ArrDelayMinutes>30) AS flights_delayed,
round(sum(ArrDelayMinutes>30)/count(*),2) AS rate round(sum(ArrDelayMinutes>30)/count(*),2) AS rate
FROM ontime FROM ontime

View File

@ -415,7 +415,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
Значения по умолчанию: при указанном `address` - `LOG_USER`, иначе - `LOG_DAEMON` Значения по умолчанию: при указанном `address` - `LOG_USER`, иначе - `LOG_DAEMON`
- format - формат сообщений. Возможные значения - `bsd` и `syslog` - format - формат сообщений. Возможные значения - `bsd` и `syslog`
## send_crash_reports {#server_configuration_parameters-logger} ## send_crash_reports {#server_configuration_parameters-send_crash_reports}
Настройки для отправки сообщений о сбоях в команду разработчиков ядра ClickHouse через [Sentry](https://sentry.io). Настройки для отправки сообщений о сбоях в команду разработчиков ядра ClickHouse через [Sentry](https://sentry.io).
Включение этих настроек, особенно в pre-production среде, может дать очень ценную информацию и поможет развитию ClickHouse. Включение этих настроек, особенно в pre-production среде, может дать очень ценную информацию и поможет развитию ClickHouse.

View File

@ -119,6 +119,16 @@ ClickHouse применяет настройку в тех случаях, ко
Значение по умолчанию: 0. Значение по умолчанию: 0.
## http_max_uri_size {#http-max-uri-size}
Устанавливает максимальную длину URI в HTTP-запросе.
Возможные значения:
- Положительное целое.
Значение по умолчанию: 1048576.
## send_progress_in_http_headers {#settings-send_progress_in_http_headers} ## send_progress_in_http_headers {#settings-send_progress_in_http_headers}
Включает или отключает HTTP-заголовки `X-ClickHouse-Progress` в ответах `clickhouse-server`. Включает или отключает HTTP-заголовки `X-ClickHouse-Progress` в ответах `clickhouse-server`.

View File

@ -1,248 +0,0 @@
---
toc_priority: 58
toc_title: "Советы по эксплуатации"
---
# Советы по эксплуатации {#sovety-po-ekspluatatsii}
## CPU Scaling Governor {#cpu-scaling-governor}
Всегда используйте `performance` scaling governor. `ondemand` scaling governor работает намного хуже при постоянно высоком спросе.
``` bash
$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
```
## Ограничение CPU {#ogranichenie-cpu}
Процессоры могут перегреваться. С помощью `dmesg` можно увидеть, если тактовая частота процессора была ограничена из-за перегрева.
Также ограничение может устанавливаться снаружи на уровне дата-центра. С помощью `turbostat` можно за этим наблюдать под нагрузкой.
## Оперативная память {#operativnaia-pamiat}
Для небольших объёмов данных (до ~200 Гб в сжатом виде) лучше всего использовать столько памяти не меньше, чем объём данных.
Для больших объёмов данных, при выполнении интерактивных (онлайн) запросов, стоит использовать разумный объём оперативной памяти (128 Гб или более) для того, чтобы горячее подмножество данных поместилось в кеше страниц.
Даже для объёмов данных в ~50 Тб на сервер, использование 128 Гб оперативной памяти намного лучше для производительности выполнения запросов, чем 64 Гб.
Не выключайте overcommit. Значение `cat /proc/sys/vm/overcommit_memory` должно быть 0 or 1. Выполните:
``` bash
$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory
```
## Huge Pages {#huge-pages}
Механизм прозрачных huge pages нужно отключить. Он мешает работе аллокаторов памяти, что приводит к значительной деградации производительности.
``` bash
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
С помощью `perf top` можно наблюдать за временем, проведенном в ядре операционной системы для управления памятью.
Постоянные huge pages так же не нужно аллоцировать.
## Подсистема хранения {#podsistema-khraneniia}
Если ваш бюджет позволяет использовать SSD, используйте SSD.
В противном случае используйте HDD. SATA HDDs 7200 RPM подойдут.
Предпочитайте много серверов с локальными жесткими дисками вместо меньшего числа серверов с подключенными дисковыми полками.
Но для хранения архивов с редкими запросами полки всё же подходят.
## RAID {#raid}
При использовании HDD можно объединить их RAID-10, RAID-5, RAID-6 или RAID-50.
Лучше использовать программный RAID в Linux (`mdadm`). Лучше не использовать LVM.
При создании RAID-10, нужно выбрать `far` расположение.
Если бюджет позволяет, лучше выбрать RAID-10.
На более чем 4 дисках вместо RAID-5 нужно использовать RAID-6 (предпочтительнее) или RAID-50.
При использовании RAID-5, RAID-6 или RAID-50, нужно всегда увеличивать stripe_cache_size, так как значение по умолчанию выбрано не самым удачным образом.
``` bash
$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size
```
Точное число стоит вычислять из числа устройств и размер блока по формуле: `2 * num_devices * chunk_size_in_bytes / 4096`.
Размер блока в 1024 Кб подходит для всех конфигураций RAID.
Никогда не указывайте слишком маленький или слишком большой размер блока.
На SSD можно использовать RAID-0.
Вне зависимости от использования RAID, всегда используйте репликацию для безопасности данных.
Включите NCQ с длинной очередью. Для HDD стоит выбрать планировщик CFQ, а для SSD — noop. Не стоит уменьшать настройку readahead.
На HDD стоит включать кеш записи.
## Файловая система {#failovaia-sistema}
Ext4 самый проверенный вариант. Укажите опции монтирования `noatime,nobarrier`.
XFS также подходит, но не так тщательно протестирована в сочетании с ClickHouse.
Большинство других файловых систем также должны нормально работать. Файловые системы с отложенной аллокацией работают лучше.
## Ядро Linux {#iadro-linux}
Не используйте слишком старое ядро Linux.
## Сеть {#set}
При использовании IPv6, стоит увеличить размер кеша маршрутов.
Ядра Linux до 3.2 имели массу проблем в реализации IPv6.
Предпочитайте как минимум 10 Гбит сеть. 1 Гбит также будет работать, но намного хуже для починки реплик с десятками терабайт данных или для обработки распределенных запросов с большим объёмом промежуточных данных.
## ZooKeeper {#zookeeper}
Вероятно вы уже используете ZooKeeper для других целей. Можно использовать ту же инсталляцию ZooKeeper, если она не сильно перегружена.
Лучше использовать свежую версию ZooKeeper, как минимум 3.4.9. Версия в стабильных дистрибутивах Linux может быть устаревшей.
Никогда не используете написанные вручную скрипты для переноса данных между разными ZooKeeper кластерами, потому что результат будет некорректный для sequential нод. Никогда не используйте утилиту «zkcopy», по той же причине: https://github.com/ksprojects/zkcopy/issues/15
Если вы хотите разделить существующий ZooKeeper кластер на два, правильный способ - увеличить количество его реплик, а затем переконфигурировать его как два независимых кластера.
Не запускайте ZooKeeper на тех же серверах, что и ClickHouse. Потому что ZooKeeper очень чувствителен к задержкам, а ClickHouse может использовать все доступные системные ресурсы.
С настройками по умолчанию, ZooKeeper является бомбой замедленного действия:
> Сервер ZooKeeper не будет удалять файлы со старыми снепшоты и логами при использовании конфигурации по умолчанию (см. autopurge), это является ответственностью оператора.
Эту бомбу нужно обезвредить.
Далее описана конфигурация ZooKeeper (3.5.1), используемая в боевом окружении Яндекс.Метрики на момент 20 мая 2017 года:
zoo.cfg:
``` bash
# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=30000
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=10
maxClientCnxns=2000
maxSessionTimeout=60000000
# the directory where the snapshot is stored.
dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/data
# Place the dataLogDir to a separate physical disc for better performance
dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/logs
autopurge.snapRetainCount=10
autopurge.purgeInterval=1
# To avoid seeks ZooKeeper allocates space in the transaction log file in
# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
# for changing the size of the blocks is to reduce the block size if snapshots
# are taken more often. (Also, see snapCount).
preAllocSize=131072
# Clients can submit requests faster than ZooKeeper can process them,
# especially if there are a lot of clients. To prevent ZooKeeper from running
# out of memory due to queued requests, ZooKeeper will throttle clients so that
# there is no more than globalOutstandingLimit outstanding requests in the
# system. The default limit is 1,000.ZooKeeper logs transactions to a
# transaction log. After snapCount transactions are written to a log file a
# snapshot is started and a new transaction log file is started. The default
# snapCount is 10,000.
snapCount=3000000
# If this option is defined, requests will be will logged to a trace file named
# traceFile.year.month.day.
#traceFile=
# Leader accepts client connections. Default value is "yes". The leader machine
# coordinates updates. For higher update throughput at thes slight expense of
# read throughput the leader can be configured to not accept clients and focus
# on coordination.
leaderServes=yes
standaloneEnabled=false
dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/zoo.cfg.dynamic
```
Версия Java:
``` text
Java(TM) SE Runtime Environment (build 1.8.0_25-b17)
Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode)
```
Параметры JVM:
``` bash
NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}
ZOOCFGDIR=/etc/$NAME/conf
# TODO this is really ugly
# How to find out, which jars are needed?
# seems, that log4j requires the log4j.properties file to be in the classpath
CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper"
ZOOCFG="$ZOOCFGDIR/zoo.cfg"
ZOO_LOG_DIR=/var/log/$NAME
USER=zookeeper
GROUP=zookeeper
PIDDIR=/var/run/$NAME
PIDFILE=$PIDDIR/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
JAVA=/usr/bin/java
ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
JMXLOCALONLY=false
JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '{{' }} '}}' }} \
-Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '{{' }} '}}' }} \
-Xloggc:/var/log/$NAME/zookeeper-gc.log \
-XX:+UseGCLogFileRotation \
-XX:NumberOfGCLogFiles=16 \
-XX:GCLogFileSize=16M \
-verbose:gc \
-XX:+PrintGCTimeStamps \
-XX:+PrintGCDateStamps \
-XX:+PrintGCDetails
-XX:+PrintTenuringDistribution \
-XX:+PrintGCApplicationStoppedTime \
-XX:+PrintGCApplicationConcurrentTime \
-XX:+PrintSafepointStatistics \
-XX:+UseParNewGC \
-XX:+UseConcMarkSweepGC \
-XX:+CMSParallelRemarkEnabled"
```
Salt init:
``` text
description "zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} centralized coordination service"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
limit nofile 8192 8192
pre-start script
[ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment" ] || exit 0
. /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment
[ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR
chown $USER:$GROUP $ZOO_LOG_DIR
end script
script
. /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment
[ -r /etc/default/zookeeper ] && . /etc/default/zookeeper
if [ -z "$JMXDISABLE" ]; then
JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY"
fi
exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} \
-- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \
-Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG
end script
```

1
docs/ru/operations/tips.md Symbolic link
View File

@ -0,0 +1 @@
../../en/operations/tips.md

View File

@ -12,6 +12,9 @@ toc_priority: 208
Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса.
!!! note "Примечание"
Использование `quantileTDigestWeighted` [не рекомендуется для небольших наборов данных](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) и может привести к значительной ошибке. Рассмотрите возможность использования [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) в таких случаях.
**Синтаксис** **Синтаксис**
``` sql ``` sql

View File

@ -224,7 +224,7 @@ assumeNotNull(x)
**Возвращаемые значения** **Возвращаемые значения**
- Исходное значение с не `Nullable` типом, если оно — не `NULL`. - Исходное значение с не `Nullable` типом, если оно — не `NULL`.
- Значение по умолчанию для не `Nullable` типа, если исходное значение — `NULL`. - Неспецифицированный результат, зависящий от реализации, если исходное значение — `NULL`.
**Пример** **Пример**

View File

@ -9,8 +9,9 @@ toc_title: "Манипуляции с индексами"
Добавить или удалить индекс можно с помощью операций Добавить или удалить индекс можно с помощью операций
``` sql ``` sql
ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name]
ALTER TABLE [db].name DROP INDEX name ALTER TABLE [db.]name DROP INDEX name
ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
``` ```
Поддерживается только таблицами семейства `*MergeTree`. Поддерживается только таблицами семейства `*MergeTree`.
@ -18,6 +19,7 @@ ALTER TABLE [db].name DROP INDEX name
Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно. Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно.
Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`. Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`.
`MATERIALIZE INDEX` - перестраивает индекс в указанной партиции. Реализовано как мутация.
Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах.

View File

@ -47,9 +47,9 @@ Union
### EXPLAIN AST {#explain-ast} ### EXPLAIN AST {#explain-ast}
Дамп AST запроса. Дамп AST запроса. Поддерживает все типы запросов, не только `SELECT`.
Пример: Примеры:
```sql ```sql
EXPLAIN AST SELECT 1; EXPLAIN AST SELECT 1;
@ -63,6 +63,22 @@ SelectWithUnionQuery (children 1)
Literal UInt64_1 Literal UInt64_1
``` ```
```sql
EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today();
```
```sql
explain
AlterQuery t1 (children 1)
ExpressionList (children 1)
AlterCommand 27 (children 1)
Function equals (children 1)
ExpressionList (children 2)
Identifier date
Function today (children 1)
ExpressionList
```
### EXPLAIN SYNTAX {#explain-syntax} ### EXPLAIN SYNTAX {#explain-syntax}
Возвращает текст запроса после применения синтаксических оптимизаций. Возвращает текст запроса после применения синтаксических оптимизаций.

View File

@ -31,7 +31,7 @@ toc_title: 云
## 阿里云 {#alibaba-cloud} ## 阿里云 {#alibaba-cloud}
阿里云的 ClickHouse 托管服务 [中国站](https://www.aliyun.com/product/clickhouse) (国际站于2021年5月初开放) 提供以下主要功能: [阿里云的 ClickHouse 托管服务](https://www.alibabacloud.com/zh/product/clickhouse) 提供以下主要功能:
- 基于阿里飞天分布式系统的高可靠云盘存储引擎 - 基于阿里飞天分布式系统的高可靠云盘存储引擎
- 按需扩容,无需手动进行数据搬迁 - 按需扩容,无需手动进行数据搬迁

View File

@ -29,126 +29,127 @@ done
创建表结构: 创建表结构:
``` sql ``` sql
CREATE TABLE `ontime` ( CREATE TABLE `ontime`
`Year` UInt16, (
`Quarter` UInt8, `Year` UInt16,
`Month` UInt8, `Quarter` UInt8,
`DayofMonth` UInt8, `Month` UInt8,
`DayOfWeek` UInt8, `DayofMonth` UInt8,
`FlightDate` Date, `DayOfWeek` UInt8,
`UniqueCarrier` FixedString(7), `FlightDate` Date,
`AirlineID` Int32, `Reporting_Airline` String,
`Carrier` FixedString(2), `DOT_ID_Reporting_Airline` Int32,
`TailNum` String, `IATA_CODE_Reporting_Airline` String,
`FlightNum` String, `Tail_Number` Int32,
`OriginAirportID` Int32, `Flight_Number_Reporting_Airline` String,
`OriginAirportSeqID` Int32, `OriginAirportID` Int32,
`OriginCityMarketID` Int32, `OriginAirportSeqID` Int32,
`Origin` FixedString(5), `OriginCityMarketID` Int32,
`OriginCityName` String, `Origin` FixedString(5),
`OriginState` FixedString(2), `OriginCityName` String,
`OriginStateFips` String, `OriginState` FixedString(2),
`OriginStateName` String, `OriginStateFips` String,
`OriginWac` Int32, `OriginStateName` String,
`DestAirportID` Int32, `OriginWac` Int32,
`DestAirportSeqID` Int32, `DestAirportID` Int32,
`DestCityMarketID` Int32, `DestAirportSeqID` Int32,
`Dest` FixedString(5), `DestCityMarketID` Int32,
`DestCityName` String, `Dest` FixedString(5),
`DestState` FixedString(2), `DestCityName` String,
`DestStateFips` String, `DestState` FixedString(2),
`DestStateName` String, `DestStateFips` String,
`DestWac` Int32, `DestStateName` String,
`CRSDepTime` Int32, `DestWac` Int32,
`DepTime` Int32, `CRSDepTime` Int32,
`DepDelay` Int32, `DepTime` Int32,
`DepDelayMinutes` Int32, `DepDelay` Int32,
`DepDel15` Int32, `DepDelayMinutes` Int32,
`DepartureDelayGroups` String, `DepDel15` Int32,
`DepTimeBlk` String, `DepartureDelayGroups` String,
`TaxiOut` Int32, `DepTimeBlk` String,
`WheelsOff` Int32, `TaxiOut` Int32,
`WheelsOn` Int32, `WheelsOff` Int32,
`TaxiIn` Int32, `WheelsOn` Int32,
`CRSArrTime` Int32, `TaxiIn` Int32,
`ArrTime` Int32, `CRSArrTime` Int32,
`ArrDelay` Int32, `ArrTime` Int32,
`ArrDelayMinutes` Int32, `ArrDelay` Int32,
`ArrDel15` Int32, `ArrDelayMinutes` Int32,
`ArrivalDelayGroups` Int32, `ArrDel15` Int32,
`ArrTimeBlk` String, `ArrivalDelayGroups` Int32,
`Cancelled` UInt8, `ArrTimeBlk` String,
`CancellationCode` FixedString(1), `Cancelled` UInt8,
`Diverted` UInt8, `CancellationCode` FixedString(1),
`CRSElapsedTime` Int32, `Diverted` UInt8,
`ActualElapsedTime` Int32, `CRSElapsedTime` Int32,
`AirTime` Int32, `ActualElapsedTime` Int32,
`Flights` Int32, `AirTime` Nullable(Int32),
`Distance` Int32, `Flights` Int32,
`DistanceGroup` UInt8, `Distance` Int32,
`CarrierDelay` Int32, `DistanceGroup` UInt8,
`WeatherDelay` Int32, `CarrierDelay` Int32,
`NASDelay` Int32, `WeatherDelay` Int32,
`SecurityDelay` Int32, `NASDelay` Int32,
`LateAircraftDelay` Int32, `SecurityDelay` Int32,
`FirstDepTime` String, `LateAircraftDelay` Int32,
`TotalAddGTime` String, `FirstDepTime` String,
`LongestAddGTime` String, `TotalAddGTime` String,
`DivAirportLandings` String, `LongestAddGTime` String,
`DivReachedDest` String, `DivAirportLandings` String,
`DivActualElapsedTime` String, `DivReachedDest` String,
`DivArrDelay` String, `DivActualElapsedTime` String,
`DivDistance` String, `DivArrDelay` String,
`Div1Airport` String, `DivDistance` String,
`Div1AirportID` Int32, `Div1Airport` String,
`Div1AirportSeqID` Int32, `Div1AirportID` Int32,
`Div1WheelsOn` String, `Div1AirportSeqID` Int32,
`Div1TotalGTime` String, `Div1WheelsOn` String,
`Div1LongestGTime` String, `Div1TotalGTime` String,
`Div1WheelsOff` String, `Div1LongestGTime` String,
`Div1TailNum` String, `Div1WheelsOff` String,
`Div2Airport` String, `Div1TailNum` String,
`Div2AirportID` Int32, `Div2Airport` String,
`Div2AirportSeqID` Int32, `Div2AirportID` Int32,
`Div2WheelsOn` String, `Div2AirportSeqID` Int32,
`Div2TotalGTime` String, `Div2WheelsOn` String,
`Div2LongestGTime` String, `Div2TotalGTime` String,
`Div2WheelsOff` String, `Div2LongestGTime` String,
`Div2TailNum` String, `Div2WheelsOff` String,
`Div3Airport` String, `Div2TailNum` String,
`Div3AirportID` Int32, `Div3Airport` String,
`Div3AirportSeqID` Int32, `Div3AirportID` Int32,
`Div3WheelsOn` String, `Div3AirportSeqID` Int32,
`Div3TotalGTime` String, `Div3WheelsOn` String,
`Div3LongestGTime` String, `Div3TotalGTime` String,
`Div3WheelsOff` String, `Div3LongestGTime` String,
`Div3TailNum` String, `Div3WheelsOff` String,
`Div4Airport` String, `Div3TailNum` String,
`Div4AirportID` Int32, `Div4Airport` String,
`Div4AirportSeqID` Int32, `Div4AirportID` Int32,
`Div4WheelsOn` String, `Div4AirportSeqID` Int32,
`Div4TotalGTime` String, `Div4WheelsOn` String,
`Div4LongestGTime` String, `Div4TotalGTime` String,
`Div4WheelsOff` String, `Div4LongestGTime` String,
`Div4TailNum` String, `Div4WheelsOff` String,
`Div5Airport` String, `Div4TailNum` String,
`Div5AirportID` Int32, `Div5Airport` String,
`Div5AirportSeqID` Int32, `Div5AirportID` Int32,
`Div5WheelsOn` String, `Div5AirportSeqID` Int32,
`Div5TotalGTime` String, `Div5WheelsOn` String,
`Div5LongestGTime` String, `Div5TotalGTime` String,
`Div5WheelsOff` String, `Div5LongestGTime` String,
`Div5TailNum` String `Div5WheelsOff` String,
`Div5TailNum` String
) ENGINE = MergeTree ) ENGINE = MergeTree
PARTITION BY Year PARTITION BY Year
ORDER BY (Carrier, FlightDate) ORDER BY (IATA_CODE_Reporting_Airline, FlightDate)
SETTINGS index_granularity = 8192; SETTINGS index_granularity = 8192;
``` ```
加载数据: 加载数据:
``` bash ``` bash
$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'"
``` ```
## 下载预处理好的分区数据 {#xia-zai-yu-chu-li-hao-de-fen-qu-shu-ju} ## 下载预处理好的分区数据 {#xia-zai-yu-chu-li-hao-de-fen-qu-shu-ju}
@ -212,7 +213,7 @@ LIMIT 10;
Q4. 查询2007年各航空公司延误超过10分钟以上的次数 Q4. 查询2007年各航空公司延误超过10分钟以上的次数
``` sql ``` sql
SELECT Carrier, count(*) SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*)
FROM ontime FROM ontime
WHERE DepDelay>10 AND Year=2007 WHERE DepDelay>10 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -226,29 +227,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year=2007 AND Year=2007
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
更好的查询版本: 更好的查询版本:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year=2007 WHERE Year=2007
GROUP BY Carrier GROUP BY Carrier
@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3
FROM FROM
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c count(*) AS c
FROM ontime FROM ontime
WHERE DepDelay>10 WHERE DepDelay>10
AND Year>=2000 AND Year<=2008 AND Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) ) q
JOIN JOIN
( (
SELECT SELECT
Carrier, IATA_CODE_Reporting_Airline AS Carrier,
count(*) AS c2 count(*) AS c2
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
) USING Carrier ) qq USING Carrier
ORDER BY c3 DESC; ORDER BY c3 DESC;
``` ```
更好的查询版本: 更好的查询版本:
``` sql ``` sql
SELECT Carrier, avg(DepDelay>10)*100 AS c3 SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3
FROM ontime FROM ontime
WHERE Year>=2000 AND Year<=2008 WHERE Year>=2000 AND Year<=2008
GROUP BY Carrier GROUP BY Carrier
@ -303,7 +304,7 @@ FROM
from ontime from ontime
WHERE DepDelay>10 WHERE DepDelay>10
GROUP BY Year GROUP BY Year
) ) q
JOIN JOIN
( (
select select
@ -311,7 +312,7 @@ JOIN
count(*) as c2 count(*) as c2
from ontime from ontime
GROUP BY Year GROUP BY Year
) USING (Year) ) qq USING (Year)
ORDER BY Year; ORDER BY Year;
``` ```
@ -346,7 +347,7 @@ Q10.
``` sql ``` sql
SELECT SELECT
min(Year), max(Year), Carrier, count(*) AS cnt, min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt,
sum(ArrDelayMinutes>30) AS flights_delayed, sum(ArrDelayMinutes>30) AS flights_delayed,
round(sum(ArrDelayMinutes>30)/count(*),2) AS rate round(sum(ArrDelayMinutes>30)/count(*),2) AS rate
FROM ontime FROM ontime

View File

@ -23,19 +23,9 @@ function _complete_for_clickhouse_entrypoint_bin()
fi fi
util="${words[1]}" util="${words[1]}"
case "$prev" in if _complete_for_clickhouse_generic_bin_impl "$prev"; then
-C|--config-file|--config) COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") )
return fi
;;
# Argh... This looks like a bash bug...
# Redirections are passed to the completion function
# although it is managed by the shell directly...
'<'|'>'|'>>'|[12]'>'|[12]'>>')
return
;;
esac
COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") )
return 0 return 0
} }

View File

@ -15,6 +15,13 @@ shopt -s extglob
export _CLICKHOUSE_COMPLETION_LOADED=1 export _CLICKHOUSE_COMPLETION_LOADED=1
CLICKHOUSE_QueryProcessingStage=(
complete
fetch_columns
with_mergeable_state
with_mergeable_state_after_aggregation
)
function _clickhouse_bin_exist() function _clickhouse_bin_exist()
{ [ -x "$1" ] || command -v "$1" >& /dev/null; } { [ -x "$1" ] || command -v "$1" >& /dev/null; }
@ -30,6 +37,33 @@ function _clickhouse_get_options()
"$@" --help 2>&1 | awk -F '[ ,=<>]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 0, 1) == "-" && length($i) > 1) print $i; } }' | sort -u "$@" --help 2>&1 | awk -F '[ ,=<>]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 0, 1) == "-" && length($i) > 1) print $i; } }' | sort -u
} }
function _complete_for_clickhouse_generic_bin_impl()
{
local prev=$1 && shift
case "$prev" in
-C|--config-file|--config)
return 1
;;
--stage)
COMPREPLY=( $(compgen -W "${CLICKHOUSE_QueryProcessingStage[*]}" -- "$cur") )
return 1
;;
--host)
COMPREPLY=( $(compgen -A hostname -- "$cur") )
return 1
;;
# Argh... This looks like a bash bug...
# Redirections are passed to the completion function
# although it is managed by the shell directly...
'<'|'>'|'>>'|[12]'>'|[12]'>>')
return 1
;;
esac
return 0
}
function _complete_for_clickhouse_generic_bin() function _complete_for_clickhouse_generic_bin()
{ {
local cur prev local cur prev
@ -39,19 +73,9 @@ function _complete_for_clickhouse_generic_bin()
COMPREPLY=() COMPREPLY=()
_get_comp_words_by_ref cur prev _get_comp_words_by_ref cur prev
case "$prev" in if _complete_for_clickhouse_generic_bin_impl "$prev"; then
-C|--config-file|--config) COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") )
return fi
;;
# Argh... This looks like a bash bug...
# Redirections are passed to the completion function
# although it is managed by the shell directly...
'<'|'>'|'>>'|[12]'>'|[12]'>>')
return
;;
esac
COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") )
return 0 return 0
} }

View File

@ -1461,18 +1461,25 @@ private:
// when `lambda()` function gets substituted into a wrong place. // when `lambda()` function gets substituted into a wrong place.
// To avoid dealing with these cases, run the check only for the // To avoid dealing with these cases, run the check only for the
// queries we were able to successfully execute. // queries we were able to successfully execute.
// The final caveat is that sometimes WITH queries are not executed, // Another caveat is that sometimes WITH queries are not executed,
// if they are not referenced by the main SELECT, so they can still // if they are not referenced by the main SELECT, so they can still
// have the aforementioned problems. Disable this check for such // have the aforementioned problems. Disable this check for such
// queries, for lack of a better solution. // queries, for lack of a better solution.
if (!have_error && queryHasWithClause(parsed_query.get())) // There is also a problem that fuzzer substitutes positive Int64
// literals or Decimal literals, which are then parsed back as
// UInt64, and suddenly duplicate alias substitition starts or stops
// working (ASTWithAlias::formatImpl) or something like that.
// So we compare not even the first and second formatting of the
// query, but second and third.
// If you have to add any more workarounds to this check, just remove
// it altogether, it's not so useful.
if (!have_error && !queryHasWithClause(parsed_query.get()))
{ {
ASTPtr parsed_formatted_query; ASTPtr ast_2;
try try
{ {
const auto * tmp_pos = query_to_send.c_str(); const auto * tmp_pos = query_to_send.c_str();
parsed_formatted_query = parseQuery(tmp_pos, ast_2 = parseQuery(tmp_pos, tmp_pos + query_to_send.size(),
tmp_pos + query_to_send.size(),
false /* allow_multi_statements */); false /* allow_multi_statements */);
} }
catch (Exception & e) catch (Exception & e)
@ -1483,25 +1490,31 @@ private:
} }
} }
if (parsed_formatted_query) if (ast_2)
{ {
const auto formatted_twice const auto text_2 = ast_2->formatForErrorMessage();
= parsed_formatted_query->formatForErrorMessage(); const auto * tmp_pos = text_2.c_str();
const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(),
false /* allow_multi_statements */);
const auto text_3 = ast_3->formatForErrorMessage();
if (formatted_twice != query_to_send) if (text_3 != text_2)
{ {
fmt::print(stderr, "The query formatting is broken.\n"); fmt::print(stderr, "The query formatting is broken.\n");
printChangedSettings(); printChangedSettings();
fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", fmt::print(stderr,
formatted_twice, query_to_send); "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n",
text_3, text_2);
fmt::print(stderr, "In more detail:\n"); fmt::print(stderr, "In more detail:\n");
fmt::print(stderr, "AST-1:\n'{}'\n", parsed_query->dumpTree()); fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", parsed_query->dumpTree());
fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_send); fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_send);
fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree());
fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2);
fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3->dumpTree());
fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3);
fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n");
exit(1); exit(1);
} }
@ -1518,6 +1531,11 @@ private:
server_exception.reset(); server_exception.reset();
client_exception.reset(); client_exception.reset();
have_error = false; have_error = false;
// We have to reinitialize connection after errors, because it
// might have gotten into a wrong state and we'll get false
// positives about "Unknown packet from server".
connection->forceConnected(connection_parameters.timeouts);
} }
else if (ast_to_process->formatForErrorMessage().size() > 500) else if (ast_to_process->formatForErrorMessage().size() > 500)
{ {

View File

@ -27,6 +27,7 @@
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Parsers/parseQuery.h> #include <Parsers/parseQuery.h>
namespace DB namespace DB
{ {

View File

@ -4,11 +4,14 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <pcg-random/pcg_random.hpp>
#include <Common/randomSeed.h> #include <Common/randomSeed.h>
#include <Common/Stopwatch.h> #include <Common/Stopwatch.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Parsers/IAST.h> #include <Parsers/IAST.h>
namespace DB namespace DB
{ {
@ -50,7 +53,7 @@ struct QueryFuzzer
// Some debug fields for detecting problematic ASTs with loops. // Some debug fields for detecting problematic ASTs with loops.
// These are reset for each fuzzMain call. // These are reset for each fuzzMain call.
std::unordered_set<const IAST *> debug_visited_nodes; std::unordered_set<const IAST *> debug_visited_nodes;
ASTPtr * debug_top_ast; ASTPtr * debug_top_ast = nullptr;
// This is the only function you have to call -- it will modify the passed // This is the only function you have to call -- it will modify the passed

View File

@ -774,7 +774,7 @@ UInt128 diffHash(const CommitDiff & file_changes)
} }
UInt128 hash_of_diff; UInt128 hash_of_diff;
hasher.get128(hash_of_diff.low, hash_of_diff.high); hasher.get128(hash_of_diff.items[0], hash_of_diff.items[1]);
return hash_of_diff; return hash_of_diff;
} }

View File

@ -844,8 +844,8 @@ namespace
fmt::print("The pidof command returned unusual output.\n"); fmt::print("The pidof command returned unusual output.\n");
} }
WriteBufferFromFileDescriptor stderr(STDERR_FILENO); WriteBufferFromFileDescriptor std_err(STDERR_FILENO);
copyData(sh->err, stderr); copyData(sh->err, std_err);
sh->tryWait(); sh->tryWait();
} }

View File

@ -23,6 +23,8 @@ public:
SharedLibraryHandler(const SharedLibraryHandler & other); SharedLibraryHandler(const SharedLibraryHandler & other);
SharedLibraryHandler & operator=(const SharedLibraryHandler & other) = delete;
~SharedLibraryHandler(); ~SharedLibraryHandler();
BlockInputStreamPtr loadAll(); BlockInputStreamPtr loadAll();

View File

@ -365,16 +365,20 @@ static void transformFixedString(const UInt8 * src, UInt8 * dst, size_t size, UI
} }
} }
static void transformUUID(const UInt128 & src, UInt128 & dst, UInt64 seed) static void transformUUID(const UUID & src_uuid, UUID & dst_uuid, UInt64 seed)
{ {
const UInt128 & src = src_uuid.toUnderType();
UInt128 & dst = dst_uuid.toUnderType();
SipHash hash; SipHash hash;
hash.update(seed); hash.update(seed);
hash.update(reinterpret_cast<const char *>(&src), sizeof(UInt128)); hash.update(reinterpret_cast<const char *>(&src), sizeof(UUID));
/// Saving version and variant from an old UUID /// Saving version and variant from an old UUID
hash.get128(reinterpret_cast<char *>(&dst)); hash.get128(reinterpret_cast<char *>(&dst));
dst.high = (dst.high & 0x1fffffffffffffffull) | (src.high & 0xe000000000000000ull);
dst.low = (dst.low & 0xffffffffffff0fffull) | (src.low & 0x000000000000f000ull); dst.items[1] = (dst.items[1] & 0x1fffffffffffffffull) | (src.items[1] & 0xe000000000000000ull);
dst.items[0] = (dst.items[0] & 0xffffffffffff0fffull) | (src.items[0] & 0x000000000000f000ull);
} }
class FixedStringModel : public IModel class FixedStringModel : public IModel
@ -426,10 +430,10 @@ public:
ColumnPtr generate(const IColumn & column) override ColumnPtr generate(const IColumn & column) override
{ {
const ColumnUInt128 & src_column = assert_cast<const ColumnUInt128 &>(column); const ColumnUUID & src_column = assert_cast<const ColumnUUID &>(column);
const auto & src_data = src_column.getData(); const auto & src_data = src_column.getData();
auto res_column = ColumnUInt128::create(); auto res_column = ColumnUUID::create();
auto & res_data = res_column->getData(); auto & res_data = res_column->getData();
res_data.resize(src_data.size()); res_data.resize(src_data.size());

View File

@ -109,7 +109,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
nanodbc::catalog catalog(*connection); nanodbc::catalog catalog(connection->get());
std::string catalog_name; std::string catalog_name;
/// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them. /// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them.

View File

@ -46,7 +46,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
auto identifier = getIdentifierQuote(*connection); auto identifier = getIdentifierQuote(connection->get());
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
try try

View File

@ -18,13 +18,10 @@
#include <Processors/Formats/InputStreamFromInputFormat.h> #include <Processors/Formats/InputStreamFromInputFormat.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <Server/HTTP/HTMLForm.h> #include <Server/HTTP/HTMLForm.h>
#include "ODBCConnectionFactory.h"
#include <mutex> #include <mutex>
#include <memory> #include <memory>
#include <nanodbc/nanodbc.h>
namespace DB namespace DB
{ {
@ -133,12 +130,12 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
auto quoting_style = IdentifierQuotingStyle::None; auto quoting_style = IdentifierQuotingStyle::None;
#if USE_ODBC #if USE_ODBC
quoting_style = getQuotingStyle(*connection); quoting_style = getQuotingStyle(connection->get());
#endif #endif
auto & read_buf = request.getStream(); auto & read_buf = request.getStream();
auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size); auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size);
auto input_stream = std::make_shared<InputStreamFromInputFormat>(input_format); auto input_stream = std::make_shared<InputStreamFromInputFormat>(input_format);
ODBCBlockOutputStream output_stream(*connection, db_name, table_name, *sample_block, getContext(), quoting_style); ODBCBlockOutputStream output_stream(std::move(connection), db_name, table_name, *sample_block, getContext(), quoting_style);
copyData(*input_stream, output_stream); copyData(*input_stream, output_stream);
writeStringBinary("Ok.", out); writeStringBinary("Ok.", out);
} }
@ -148,7 +145,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
LOG_TRACE(log, "Query: {}", query); LOG_TRACE(log, "Query: {}", query);
BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext()); BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext());
ODBCBlockInputStream inp(*connection, query, *sample_block, max_block_size); ODBCBlockInputStream inp(std::move(connection), query, *sample_block, max_block_size);
copyData(inp, *writer); copyData(inp, *writer);
} }
} }

View File

@ -21,14 +21,13 @@ namespace ErrorCodes
ODBCBlockInputStream::ODBCBlockInputStream( ODBCBlockInputStream::ODBCBlockInputStream(
nanodbc::connection & connection_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
: log(&Poco::Logger::get("ODBCBlockInputStream")) : log(&Poco::Logger::get("ODBCBlockInputStream"))
, max_block_size{max_block_size_} , max_block_size{max_block_size_}
, connection(connection_)
, query(query_str) , query(query_str)
{ {
description.init(sample_block); description.init(sample_block);
result = execute(connection, NANODBC_TEXT(query)); result = execute(connection->get(), NANODBC_TEXT(query));
} }

View File

@ -4,7 +4,7 @@
#include <Core/Block.h> #include <Core/Block.h>
#include <DataStreams/IBlockInputStream.h> #include <DataStreams/IBlockInputStream.h>
#include <Core/ExternalResultDescription.h> #include <Core/ExternalResultDescription.h>
#include <nanodbc/nanodbc.h> #include "ODBCConnectionFactory.h"
namespace DB namespace DB
@ -13,7 +13,7 @@ namespace DB
class ODBCBlockInputStream final : public IBlockInputStream class ODBCBlockInputStream final : public IBlockInputStream
{ {
public: public:
ODBCBlockInputStream(nanodbc::connection & connection_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); ODBCBlockInputStream(nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_);
String getName() const override { return "ODBC"; } String getName() const override { return "ODBC"; }
@ -36,7 +36,6 @@ private:
const UInt64 max_block_size; const UInt64 max_block_size;
ExternalResultDescription description; ExternalResultDescription description;
nanodbc::connection & connection;
nanodbc::result result; nanodbc::result result;
String query; String query;
bool finished = false; bool finished = false;

View File

@ -1,5 +1,6 @@
#include "ODBCBlockOutputStream.h" #include "ODBCBlockOutputStream.h"
#include <Common/hex.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <common/LocalDate.h> #include <common/LocalDate.h>
@ -37,17 +38,16 @@ namespace
query.IAST::format(settings); query.IAST::format(settings);
return buf.str(); return buf.str();
} }
} }
ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::connection & connection_, ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_,
const std::string & remote_database_name_, const std::string & remote_database_name_,
const std::string & remote_table_name_, const std::string & remote_table_name_,
const Block & sample_block_, const Block & sample_block_,
ContextPtr local_context_, ContextPtr local_context_,
IdentifierQuotingStyle quoting_) IdentifierQuotingStyle quoting_)
: log(&Poco::Logger::get("ODBCBlockOutputStream")) : log(&Poco::Logger::get("ODBCBlockOutputStream"))
, connection(connection_) , connection(std::move(connection_))
, db_name(remote_database_name_) , db_name(remote_database_name_)
, table_name(remote_table_name_) , table_name(remote_table_name_)
, sample_block(sample_block_) , sample_block(sample_block_)
@ -69,7 +69,7 @@ void ODBCBlockOutputStream::write(const Block & block)
writer->write(block); writer->write(block);
std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str(); std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str();
execute(connection, query); execute(connection->get(), query);
} }
} }

View File

@ -5,7 +5,7 @@
#include <Core/ExternalResultDescription.h> #include <Core/ExternalResultDescription.h>
#include <Parsers/IdentifierQuotingStyle.h> #include <Parsers/IdentifierQuotingStyle.h>
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
#include <nanodbc/nanodbc.h> #include "ODBCConnectionFactory.h"
namespace DB namespace DB
@ -16,7 +16,7 @@ class ODBCBlockOutputStream : public IBlockOutputStream
public: public:
ODBCBlockOutputStream( ODBCBlockOutputStream(
nanodbc::connection & connection_, nanodbc::ConnectionHolderPtr connection_,
const std::string & remote_database_name_, const std::string & remote_database_name_,
const std::string & remote_table_name_, const std::string & remote_table_name_,
const Block & sample_block_, const Block & sample_block_,
@ -29,7 +29,7 @@ public:
private: private:
Poco::Logger * log; Poco::Logger * log;
nanodbc::connection & connection; nanodbc::ConnectionHolderPtr connection;
std::string db_name; std::string db_name;
std::string table_name; std::string table_name;
Block sample_block; Block sample_block;

View File

@ -6,53 +6,51 @@
#include <common/BorrowedObjectPool.h> #include <common/BorrowedObjectPool.h>
#include <unordered_map> #include <unordered_map>
namespace DB
{
namespace ErrorCodes
{
extern const int NO_FREE_CONNECTION;
}
}
namespace nanodbc namespace nanodbc
{ {
static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100; using ConnectionPtr = std::unique_ptr<nanodbc::connection>;
using ConnectionPtr = std::shared_ptr<nanodbc::connection>;
using Pool = BorrowedObjectPool<ConnectionPtr>; using Pool = BorrowedObjectPool<ConnectionPtr>;
using PoolPtr = std::shared_ptr<Pool>; using PoolPtr = std::shared_ptr<Pool>;
class ConnectionHolder class ConnectionHolder
{ {
public: public:
ConnectionHolder(const std::string & connection_string_, PoolPtr pool_) : connection_string(connection_string_), pool(pool_) {} ConnectionHolder(PoolPtr pool_, ConnectionPtr connection_) : pool(pool_), connection(std::move(connection_)) {}
~ConnectionHolder() ConnectionHolder(const ConnectionHolder & other) = delete;
~ConnectionHolder() { pool->returnObject(std::move(connection)); }
nanodbc::connection & get() const
{ {
if (connection) assert(connection != nullptr);
pool->returnObject(std::move(connection));
}
nanodbc::connection & operator*()
{
if (!connection)
{
pool->borrowObject(connection, [&]()
{
return std::make_shared<nanodbc::connection>(connection_string, ODBC_CONNECT_TIMEOUT);
});
}
return *connection; return *connection;
} }
private: private:
std::string connection_string;
PoolPtr pool; PoolPtr pool;
ConnectionPtr connection; ConnectionPtr connection;
}; };
using ConnectionHolderPtr = std::unique_ptr<ConnectionHolder>;
} }
namespace DB namespace DB
{ {
static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100;
static constexpr inline auto ODBC_POOL_WAIT_TIMEOUT = 10000;
class ODBCConnectionFactory final : private boost::noncopyable class ODBCConnectionFactory final : private boost::noncopyable
{ {
public: public:
@ -62,14 +60,32 @@ public:
return ret; return ret;
} }
nanodbc::ConnectionHolder get(const std::string & connection_string, size_t pool_size) nanodbc::ConnectionHolderPtr get(const std::string & connection_string, size_t pool_size)
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
if (!factory.count(connection_string)) if (!factory.count(connection_string))
factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size))); factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size)));
return nanodbc::ConnectionHolder(connection_string, factory[connection_string]); auto & pool = factory[connection_string];
nanodbc::ConnectionPtr connection;
auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT);
if (!connection_available)
throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION);
try
{
if (!connection || !connection->connected())
connection = std::make_unique<nanodbc::connection>(connection_string, ODBC_CONNECT_TIMEOUT);
}
catch (...)
{
pool->returnObject(std::move(connection));
}
return std::make_unique<nanodbc::ConnectionHolder>(factory[connection_string], std::move(connection));
} }
private: private:

View File

@ -53,7 +53,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
bool result = isSchemaAllowed(*connection); bool result = isSchemaAllowed(connection->get());
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
try try

View File

@ -3,7 +3,6 @@
#if USE_ODBC #if USE_ODBC
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <nanodbc/nanodbc.h>
#include <sql.h> #include <sql.h>
#include <sqlext.h> #include <sqlext.h>

View File

@ -13,6 +13,7 @@
#include <Poco/Net/HTTPServer.h> #include <Poco/Net/HTTPServer.h>
#include <Poco/Net/NetException.h> #include <Poco/Net/NetException.h>
#include <Poco/Util/HelpFormatter.h> #include <Poco/Util/HelpFormatter.h>
#include <Poco/Environment.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <common/defines.h> #include <common/defines.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
@ -385,6 +386,11 @@ void Server::initialize(Poco::Util::Application & self)
{ {
BaseDaemon::initialize(self); BaseDaemon::initialize(self);
logger().information("starting up"); logger().information("starting up");
LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}",
Poco::Environment::osName(),
Poco::Environment::osVersion(),
Poco::Environment::osArchitecture());
} }
std::string Server::getDefaultCorePath() const std::string Server::getDefaultCorePath() const
@ -879,7 +885,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->setMMappedFileCache(mmap_cache_size); global_context->setMMappedFileCache(mmap_cache_size);
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", 500); constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024;
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size); CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size);
#endif #endif

View File

@ -329,6 +329,8 @@
--> -->
<mmap_cache_size>1000</mmap_cache_size> <mmap_cache_size>1000</mmap_cache_size>
<!-- Cache size for compiled expressions.-->
<compiled_expression_cache_size>1073741824</compiled_expression_cache_size>
<!-- Path to data directory, with trailing slash. --> <!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path> <path>/var/lib/clickhouse/</path>
@ -518,6 +520,33 @@
<!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. --> <!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->
<remap_executable>false</remap_executable> <remap_executable>false</remap_executable>
<![CDATA[
Uncomment below in order to use JDBC table engine and function.
To install and run JDBC bridge in background:
* [Debian/Ubuntu]
export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
clickhouse-jdbc-bridge &
* [CentOS/RHEL]
export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
clickhouse-jdbc-bridge &
Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
]]>
<!--
<jdbc_bridge>
<host>127.0.0.1</host>
<port>9019</port>
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables. <!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.tech/docs/en/operations/table_engines/distributed/ https://clickhouse.tech/docs/en/operations/table_engines/distributed/
--> -->

View File

@ -133,6 +133,7 @@ enum class AccessType
M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\ M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \ M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
M(SYSTEM_RESTART_DISK, "SYSTEM RESTART DISK", GLOBAL, SYSTEM) \
M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \ M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \
M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \ M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \
M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \ M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \

View File

@ -355,8 +355,9 @@ String DiskAccessStorage::getStorageParamsJSON() const
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};
Poco::JSON::Object json; Poco::JSON::Object json;
json.set("path", directory_path); json.set("path", directory_path);
if (readonly) bool readonly_loaded = readonly;
json.set("readonly", readonly.load()); if (readonly_loaded)
json.set("readonly", Poco::Dynamic::Var{true});
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
oss.exceptions(std::ios::failbit); oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(json, oss); Poco::JSON::Stringifier::stringify(json, oss);

View File

@ -77,7 +77,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
if (enable_tls_lc_str == "starttls") if (enable_tls_lc_str == "starttls")
params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS; params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS;
else if (config.getBool(ldap_server_config + ".enable_tls")) else if (config.getBool(ldap_server_config + ".enable_tls"))
params.enable_tls = LDAPClient::Params::TLSEnable::YES; params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048
else else
params.enable_tls = LDAPClient::Params::TLSEnable::NO; params.enable_tls = LDAPClient::Params::TLSEnable::NO;
} }
@ -96,7 +96,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_minimum_protocol_version_lc_str == "tls1.1") else if (tls_minimum_protocol_version_lc_str == "tls1.1")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1; params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1;
else if (tls_minimum_protocol_version_lc_str == "tls1.2") else if (tls_minimum_protocol_version_lc_str == "tls1.2")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
else else
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS); throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
} }
@ -113,7 +113,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_require_cert_lc_str == "try") else if (tls_require_cert_lc_str == "try")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY; params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY;
else if (tls_require_cert_lc_str == "demand") else if (tls_require_cert_lc_str == "demand")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
else else
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS); throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
} }

View File

@ -136,7 +136,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const
boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids)); boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids));
if (!element.empty()) if (!element.empty())
{ {
element.admin_option = false; element.admin_option = false; //-V1048
elements.emplace_back(std::move(element)); elements.emplace_back(std::move(element));
} }

View File

@ -1,7 +1,7 @@
#pragma once #pragma once
#include <Access/IAccessEntity.h> #include <Access/IAccessEntity.h>
#include <common/types.h> #include <Core/Types.h>
#include <Core/UUID.h> #include <Core/UUID.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <functional> #include <functional>

Some files were not shown because too many files have changed in this diff Show More