fix conflict

This commit is contained in:
feng lv 2021-06-25 08:06:06 +00:00
commit 228e06628a
919 changed files with 16416 additions and 10097 deletions

8
.gitmodules vendored
View File

@ -103,7 +103,7 @@
url = https://github.com/ClickHouse-Extras/fastops url = https://github.com/ClickHouse-Extras/fastops
[submodule "contrib/orc"] [submodule "contrib/orc"]
path = contrib/orc path = contrib/orc
url = https://github.com/apache/orc url = https://github.com/ClickHouse-Extras/orc
[submodule "contrib/sparsehash-c11"] [submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11 path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git url = https://github.com/sparsehash/sparsehash-c11.git
@ -210,9 +210,6 @@
[submodule "contrib/fast_float"] [submodule "contrib/fast_float"]
path = contrib/fast_float path = contrib/fast_float
url = https://github.com/fastfloat/fast_float url = https://github.com/fastfloat/fast_float
[submodule "contrib/libpqxx"]
path = contrib/libpqxx
url = https://github.com/jtv/libpqxx
[submodule "contrib/libpq"] [submodule "contrib/libpq"]
path = contrib/libpq path = contrib/libpq
url = https://github.com/ClickHouse-Extras/libpq url = https://github.com/ClickHouse-Extras/libpq
@ -231,3 +228,6 @@
[submodule "contrib/yaml-cpp"] [submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp path = contrib/yaml-cpp
url = https://github.com/ClickHouse-Extras/yaml-cpp.git url = https://github.com/ClickHouse-Extras/yaml-cpp.git
[submodule "contrib/libpqxx"]
path = contrib/libpqxx
url = https://github.com/ClickHouse-Extras/libpqxx.git

View File

@ -2,8 +2,6 @@
#### Upgrade Notes #### Upgrade Notes
* One bug has been found after release: [#25187](https://github.com/ClickHouse/ClickHouse/issues/25187).
* Do not upgrade if you have partition key with `UUID`.
* `zstd` compression library is updated to v1.5.0. You may get messages about "checksum does not match" in replication. These messages are expected due to update of compression algorithm and you can ignore them. These messages are informational and do not indicate any kinds of undesired behaviour. * `zstd` compression library is updated to v1.5.0. You may get messages about "checksum does not match" in replication. These messages are expected due to update of compression algorithm and you can ignore them. These messages are informational and do not indicate any kinds of undesired behaviour.
* The setting `compile_expressions` is enabled by default. Although it has been heavily tested on variety of scenarios, if you find some undesired behaviour on your servers, you can try turning this setting off. * The setting `compile_expressions` is enabled by default. Although it has been heavily tested on variety of scenarios, if you find some undesired behaviour on your servers, you can try turning this setting off.
* Values of `UUID` type cannot be compared with integer. For example, instead of writing `uuid != 0` type `uuid != '00000000-0000-0000-0000-000000000000'`. * Values of `UUID` type cannot be compared with integer. For example, instead of writing `uuid != 0` type `uuid != '00000000-0000-0000-0000-000000000000'`.

View File

@ -15,4 +15,4 @@ ClickHouse® is an open-source column-oriented database management system that a
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. * You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
## Upcoming Events ## Upcoming Events
* [SF Bay Area ClickHouse Community Meetup (online)](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/278144089/) on 16 June 2021. * [China ClickHouse Community Meetup (online)](http://hdxu.cn/rhbfZ) on 26 June 2021.

View File

@ -1,14 +1,22 @@
#include "IBridge.h" #include "IBridge.h"
#include <IO/ReadHelpers.h>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <Poco/Net/NetException.h> #include <Poco/Net/NetException.h>
#include <Poco/Util/HelpFormatter.h> #include <Poco/Util/HelpFormatter.h>
#include <Common/StringUtils/StringUtils.h>
#include <Formats/registerFormats.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <common/range.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/SensitiveDataMasker.h> #include <Common/SensitiveDataMasker.h>
#include <common/errnoToString.h>
#include <IO/ReadHelpers.h>
#include <Formats/registerFormats.h>
#include <Server/HTTP/HTTPServer.h> #include <Server/HTTP/HTTPServer.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/WriteHelpers.h>
#include <sys/time.h>
#include <sys/resource.h>
#if USE_ODBC #if USE_ODBC
# include <Poco/Data/ODBC/Connector.h> # include <Poco/Data/ODBC/Connector.h>
@ -163,6 +171,31 @@ void IBridge::initialize(Application & self)
max_server_connections = config().getUInt("max-server-connections", 1024); max_server_connections = config().getUInt("max-server-connections", 1024);
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10); keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
struct rlimit limit;
const UInt64 gb = 1024 * 1024 * 1024;
/// Set maximum RSS to 1 GiB.
limit.rlim_max = limit.rlim_cur = gb;
if (setrlimit(RLIMIT_RSS, &limit))
LOG_WARNING(log, "Unable to set maximum RSS to 1GB: {} (current rlim_cur={}, rlim_max={})",
errnoToString(errno), limit.rlim_cur, limit.rlim_max);
if (!getrlimit(RLIMIT_RSS, &limit))
LOG_INFO(log, "RSS limit: cur={}, max={}", limit.rlim_cur, limit.rlim_max);
try
{
const auto oom_score = toString(config().getUInt64("bridge_oom_score", 500));
WriteBufferFromFile buf("/proc/self/oom_score_adj");
buf.write(oom_score.data(), oom_score.size());
buf.close();
LOG_INFO(log, "OOM score is set to {}", oom_score);
}
catch (const Exception & e)
{
LOG_WARNING(log, "Failed to set OOM score, error: {}", e.what());
}
initializeTerminationAndSignalProcessing(); initializeTerminationAndSignalProcessing();
ServerApplication::initialize(self); // NOLINT ServerApplication::initialize(self); // NOLINT
@ -214,7 +247,7 @@ int IBridge::main(const std::vector<std::string> & /*args*/)
server.stop(); server.stop();
for (size_t count : ext::range(1, 6)) for (size_t count : collections::range(1, 6))
{ {
if (server.currentConnections() == 0) if (server.currentConnections() == 0)
break; break;

View File

@ -91,10 +91,12 @@ struct DecomposedFloat
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic. /// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
/// This function is generic, big integers (128, 256 bit) are supported as well.
/// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers. /// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers.
/// (note that we need total order) /// (note that we need total order)
/// Returns -1, 0 or 1.
template <typename Int> template <typename Int>
int compare(Int rhs) int compare(Int rhs) const
{ {
if (rhs == 0) if (rhs == 0)
return sign(); return sign();
@ -137,10 +139,11 @@ struct DecomposedFloat
if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)) if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return is_negative() ? -1 : 1; return is_negative() ? -1 : 1;
using UInt = make_unsigned_t<Int>; using UInt = std::conditional_t<(sizeof(Int) > sizeof(typename Traits::UInt)), make_unsigned_t<Int>, typename Traits::UInt>;
UInt uint_rhs = rhs < 0 ? -rhs : rhs; UInt uint_rhs = rhs < 0 ? -rhs : rhs;
/// Smaller octave: abs(rhs) < abs(float) /// Smaller octave: abs(rhs) < abs(float)
/// FYI, TIL: octave is also called "binade", https://en.wikipedia.org/wiki/Binade
if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent())) if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent()))
return is_negative() ? -1 : 1; return is_negative() ? -1 : 1;
@ -154,11 +157,11 @@ struct DecomposedFloat
bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits); bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits);
typename Traits::UInt a = large_and_always_integer UInt a = large_and_always_integer
? mantissa() << (normalized_exponent() - Traits::mantissa_bits) ? static_cast<UInt>(mantissa()) << (normalized_exponent() - Traits::mantissa_bits)
: mantissa() >> (Traits::mantissa_bits - normalized_exponent()); : static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalized_exponent());
typename Traits::UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent()); UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent());
if (a < b) if (a < b)
return is_negative() ? 1 : -1; return is_negative() ? 1 : -1;
@ -175,37 +178,37 @@ struct DecomposedFloat
template <typename Int> template <typename Int>
bool equals(Int rhs) bool equals(Int rhs) const
{ {
return compare(rhs) == 0; return compare(rhs) == 0;
} }
template <typename Int> template <typename Int>
bool notEquals(Int rhs) bool notEquals(Int rhs) const
{ {
return compare(rhs) != 0; return compare(rhs) != 0;
} }
template <typename Int> template <typename Int>
bool less(Int rhs) bool less(Int rhs) const
{ {
return compare(rhs) < 0; return compare(rhs) < 0;
} }
template <typename Int> template <typename Int>
bool greater(Int rhs) bool greater(Int rhs) const
{ {
return compare(rhs) > 0; return compare(rhs) > 0;
} }
template <typename Int> template <typename Int>
bool lessOrEquals(Int rhs) bool lessOrEquals(Int rhs) const
{ {
return compare(rhs) <= 0; return compare(rhs) <= 0;
} }
template <typename Int> template <typename Int>
bool greaterOrEquals(Int rhs) bool greaterOrEquals(Int rhs) const
{ {
return compare(rhs) >= 0; return compare(rhs) >= 0;
} }

View File

@ -1,6 +1,6 @@
#include <common/ReadlineLineReader.h> #include <common/ReadlineLineReader.h>
#include <common/errnoToString.h> #include <common/errnoToString.h>
#include <ext/scope_guard.h> #include <common/scope_guard.h>
#include <errno.h> #include <errno.h>
#include <signal.h> #include <signal.h>

View File

@ -3,7 +3,7 @@
#include <map> #include <map>
#include <tuple> #include <tuple>
#include <mutex> #include <mutex>
#include <ext/function_traits.h> #include <common/function_traits.h>
/** The simplest cache for a free function. /** The simplest cache for a free function.
@ -32,10 +32,11 @@ public:
template <typename... Args> template <typename... Args>
Result operator() (Args &&... args) Result operator() (Args &&... args)
{ {
Key key{std::forward<Args>(args)...};
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
Key key{std::forward<Args>(args)...};
auto it = cache.find(key); auto it = cache.find(key);
if (cache.end() != it) if (cache.end() != it)
@ -43,7 +44,7 @@ public:
} }
/// The calculations themselves are not done under mutex. /// The calculations themselves are not done under mutex.
Result res = f(std::forward<Args>(args)...); Result res = std::apply(f, key);
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
@ -57,11 +58,12 @@ public:
template <typename... Args> template <typename... Args>
void update(Args &&... args) void update(Args &&... args)
{ {
Result res = f(std::forward<Args>(args)...); Key key{std::forward<Args>(args)...};
Result res = std::apply(f, key);
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
Key key{std::forward<Args>(args)...};
cache[key] = std::move(res); cache[key] = std::move(res);
} }
} }

7
base/common/arraySize.h Normal file
View File

@ -0,0 +1,7 @@
#pragma once
#include <cstdlib>
/** \brief Returns number of elements in an automatic array. */
template <typename T, std::size_t N>
constexpr size_t arraySize(const T (&)[N]) noexcept { return N; }

27
base/common/bit_cast.h Normal file
View File

@ -0,0 +1,27 @@
#pragma once
#include <string.h>
#include <algorithm>
#include <type_traits>
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> bit_cast(const From & from)
{
To res {};
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
return res;
}
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> safe_bit_cast(const From & from)
{
static_assert(sizeof(To) == sizeof(From), "bit cast on types of different width");
return bit_cast<To, From>(from);
}

46
base/common/chrono_io.h Normal file
View File

@ -0,0 +1,46 @@
#pragma once
#include <chrono>
#include <string>
#include <sstream>
#include <cctz/time_zone.h>
inline std::string to_string(const std::time_t & time)
{
return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone());
}
template <typename Clock, typename Duration = typename Clock::duration>
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
{
// Don't use DateLUT because it shows weird characters for
// TimePoint::max(). I wish we could use C++20 format, but it's not
// there yet.
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
auto in_time_t = std::chrono::system_clock::to_time_t(tp);
return to_string(in_time_t);
}
template <typename Rep, typename Period = std::ratio<1>>
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
{
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
if (seconds_as_int == duration)
return std::to_string(seconds_as_int.count()) + "s";
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
return std::to_string(seconds_as_double.count()) + "s";
}
template <typename Clock, typename Duration = typename Clock::duration>
std::ostream & operator<<(std::ostream & o, const std::chrono::time_point<Clock, Duration> & tp)
{
return o << to_string(tp);
}
template <typename Rep, typename Period = std::ratio<1>>
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
{
return o << to_string(duration);
}

52
base/common/map.h Normal file
View File

@ -0,0 +1,52 @@
#pragma once
#include <type_traits>
#include <boost/iterator/transform_iterator.hpp>
namespace collections
{
/// \brief Strip type off top level reference and cv-qualifiers thus allowing storage in containers
template <typename T>
using unqualified_t = std::remove_cv_t<std::remove_reference_t<T>>;
/** \brief Returns collection of the same container-type as the input collection,
* with each element transformed by the application of `mapper`.
*/
template <template <typename...> class Collection, typename... Params, typename Mapper>
auto map(const Collection<Params...> & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified container-type,
* with each element transformed by the application of `mapper`.
* Allows conversion between different container-types, e.g. std::vector to std::list
*/
template <template <typename...> class ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified type,
* with each element transformed by the application of `mapper`.
* Allows leveraging implicit conversion between the result of applying `mapper` and R::value_type.
*/
template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
}

View File

@ -4,9 +4,9 @@
#include <boost/range/adaptor/transformed.hpp> #include <boost/range/adaptor/transformed.hpp>
#include <type_traits> #include <type_traits>
namespace collections
namespace ext
{ {
namespace internal namespace internal
{ {
template <typename ResultType, typename CountingType, typename BeginType, typename EndType> template <typename ResultType, typename CountingType, typename BeginType, typename EndType>
@ -24,11 +24,11 @@ namespace internal
/// For loop adaptor which is used to iterate through a half-closed interval [begin, end). /// For loop adaptor which is used to iterate through a half-closed interval [begin, end).
/// The parameters `begin` and `end` can have any integral or enum types. /// The parameters `begin` and `end` can have any integral or enum types.
template <typename BeginType, template <typename BeginType,
typename EndType, typename EndType,
typename = std::enable_if_t< typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) && (std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) && (std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>> (!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
inline auto range(BeginType begin, EndType end) inline auto range(BeginType begin, EndType end)
{ {
if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>) if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>)
@ -51,7 +51,7 @@ inline auto range(BeginType begin, EndType end)
/// The parameter `end` can have any integral or enum type. /// The parameter `end` can have any integral or enum type.
/// The same as range(0, end). /// The same as range(0, end).
template <typename Type, template <typename Type,
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>> typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
inline auto range(Type end) inline auto range(Type end)
{ {
if constexpr (std::is_integral_v<Type>) if constexpr (std::is_integral_v<Type>)
@ -59,4 +59,5 @@ inline auto range(Type end)
else else
return internal::rangeImpl<Type, std::underlying_type_t<Type>>(0, end); return internal::rangeImpl<Type, std::underlying_type_t<Type>>(0, end);
} }
} }

View File

@ -4,9 +4,6 @@
#include <memory> #include <memory>
#include <utility> #include <utility>
namespace ext
{
template <class F> template <class F>
class [[nodiscard]] basic_scope_guard class [[nodiscard]] basic_scope_guard
{ {
@ -105,10 +102,9 @@ using scope_guard = basic_scope_guard<std::function<void(void)>>;
template <class F> template <class F>
inline basic_scope_guard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); } inline basic_scope_guard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
}
#define SCOPE_EXIT_CONCAT(n, ...) \ #define SCOPE_EXIT_CONCAT(n, ...) \
const auto scope_exit##n = ext::make_scope_guard([&] { __VA_ARGS__; }) const auto scope_exit##n = make_scope_guard([&] { __VA_ARGS__; })
#define SCOPE_EXIT_FWD(n, ...) SCOPE_EXIT_CONCAT(n, __VA_ARGS__) #define SCOPE_EXIT_FWD(n, ...) SCOPE_EXIT_CONCAT(n, __VA_ARGS__)
#define SCOPE_EXIT(...) SCOPE_EXIT_FWD(__LINE__, __VA_ARGS__) #define SCOPE_EXIT(...) SCOPE_EXIT_FWD(__LINE__, __VA_ARGS__)

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <ext/scope_guard.h> #include <common/scope_guard.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <Common/MemoryTracker.h> #include <Common/MemoryTracker.h>

View File

@ -2,8 +2,6 @@
#include <memory> #include <memory>
namespace ext
{
/** Allows to make std::shared_ptr from T with protected constructor. /** Allows to make std::shared_ptr from T with protected constructor.
* *
@ -36,4 +34,3 @@ struct is_shared_ptr<std::shared_ptr<T>>
template <typename T> template <typename T>
inline constexpr bool is_shared_ptr_v = is_shared_ptr<T>::value; inline constexpr bool is_shared_ptr_v = is_shared_ptr<T>::value;
}

View File

@ -109,10 +109,7 @@ public:
constexpr explicit operator bool() const noexcept; constexpr explicit operator bool() const noexcept;
template <class T> template <typename T, typename = std::enable_if_t<std::is_arithmetic_v<T>, T>>
using _integral_not_wide_integer_class = typename std::enable_if<std::is_arithmetic<T>::value, T>::type;
template <class T, class = _integral_not_wide_integer_class<T>>
constexpr operator T() const noexcept; constexpr operator T() const noexcept;
constexpr operator long double() const noexcept; constexpr operator long double() const noexcept;

View File

@ -255,13 +255,13 @@ struct integer<Bits, Signed>::_impl
set_multiplier<double>(self, alpha); set_multiplier<double>(self, alpha);
self *= max_int; self *= max_int;
self += static_cast<uint64_t>(t - alpha * static_cast<T>(max_int)); // += b_i self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
} }
constexpr static void wide_integer_from_builtin(integer<Bits, Signed>& self, double rhs) noexcept constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
{ {
constexpr int64_t max_int = std::numeric_limits<int64_t>::max(); constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
constexpr int64_t min_int = std::numeric_limits<int64_t>::min(); constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
/// There are values in int64 that have more than 53 significant bits (in terms of double /// There are values in int64 that have more than 53 significant bits (in terms of double
/// representation). Such values, being promoted to double, are rounded up or down. If they are rounded up, /// representation). Such values, being promoted to double, are rounded up or down. If they are rounded up,
@ -271,14 +271,14 @@ struct integer<Bits, Signed>::_impl
/// The necessary check here is that long double has enough significant (mantissa) bits to store the /// The necessary check here is that long double has enough significant (mantissa) bits to store the
/// int64_t max value precisely. /// int64_t max value precisely.
//TODO Be compatible with Apple aarch64 // TODO Be compatible with Apple aarch64
#if not (defined(__APPLE__) && defined(__aarch64__)) #if not (defined(__APPLE__) && defined(__aarch64__))
static_assert(LDBL_MANT_DIG >= 64, static_assert(LDBL_MANT_DIG >= 64,
"On your system long double has less than 64 precision bits," "On your system long double has less than 64 precision bits, "
"which may result in UB when initializing double from int64_t"); "which may result in UB when initializing double from int64_t");
#endif #endif
if ((rhs > 0 && rhs < static_cast<long double>(max_int)) || (rhs < 0 && rhs > static_cast<long double>(min_int))) if (rhs > static_cast<long double>(min_int) && rhs < static_cast<long double>(max_int))
{ {
self = static_cast<int64_t>(rhs); self = static_cast<int64_t>(rhs);
return; return;

View File

@ -21,7 +21,7 @@
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
#include <memory> #include <memory>
#include <ext/scope_guard.h> #include <common/scope_guard.h>
#include <Poco/Observer.h> #include <Poco/Observer.h>
#include <Poco/AutoPtr.h> #include <Poco/AutoPtr.h>

View File

@ -1,30 +0,0 @@
#pragma once
#include <string.h>
#include <algorithm>
#include <type_traits>
namespace ext
{
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> bit_cast(const From & from)
{
To res {};
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
return res;
}
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> safe_bit_cast(const From & from)
{
static_assert(sizeof(To) == sizeof(From), "bit cast on types of different width");
return bit_cast<To, From>(from);
}
}

View File

@ -1,49 +0,0 @@
#pragma once
#include <chrono>
#include <string>
#include <sstream>
#include <cctz/time_zone.h>
namespace ext
{
inline std::string to_string(const std::time_t & time)
{
return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone());
}
template <typename Clock, typename Duration = typename Clock::duration>
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
{
// Don't use DateLUT because it shows weird characters for
// TimePoint::max(). I wish we could use C++20 format, but it's not
// there yet.
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
auto in_time_t = std::chrono::system_clock::to_time_t(tp);
return to_string(in_time_t);
}
template <typename Rep, typename Period = std::ratio<1>>
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
{
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
if (seconds_as_int == duration)
return std::to_string(seconds_as_int.count()) + "s";
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
return std::to_string(seconds_as_double.count()) + "s";
}
template <typename Clock, typename Duration = typename Clock::duration>
std::ostream & operator<<(std::ostream & o, const std::chrono::time_point<Clock, Duration> & tp)
{
return o << to_string(tp);
}
template <typename Rep, typename Period = std::ratio<1>>
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
{
return o << to_string(duration);
}
}

View File

@ -1,24 +0,0 @@
#pragma once
#include <iterator>
namespace ext
{
/** \brief Returns collection of specified container-type.
* Retains stored value_type, constructs resulting collection using iterator range. */
template <template <typename...> class ResultCollection, typename Collection>
auto collection_cast(const Collection & collection)
{
using value_type = typename Collection::value_type;
return ResultCollection<value_type>(std::begin(collection), std::end(collection));
}
/** \brief Returns collection of specified type.
* Performs implicit conversion of between source and result value_type, if available and required. */
template <typename ResultCollection, typename Collection>
auto collection_cast(const Collection & collection)
{
return ResultCollection(std::begin(collection), std::end(collection));
}
}

View File

@ -1,60 +0,0 @@
#pragma once
#include <ext/size.h>
#include <type_traits>
#include <utility>
#include <iterator>
/** \brief Provides a wrapper view around a container, allowing to iterate over it's elements and indices.
* Allow writing code like shown below:
*
* std::vector<T> v = getVector();
* for (const std::pair<const std::size_t, T &> index_and_value : ext::enumerate(v))
* std::cout << "element " << index_and_value.first << " is " << index_and_value.second << std::endl;
*/
namespace ext
{
template <typename It> struct enumerate_iterator
{
using traits = typename std::iterator_traits<It>;
using iterator_category = typename traits::iterator_category;
using value_type = std::pair<const std::size_t, typename traits::value_type>;
using difference_type = typename traits::difference_type;
using reference = std::pair<const std::size_t, typename traits::reference>;
std::size_t idx;
It it;
enumerate_iterator(const std::size_t idx_, It it_) : idx{idx_}, it{it_} {}
auto operator*() const { return reference(idx, *it); }
bool operator!=(const enumerate_iterator & other) const { return it != other.it; }
enumerate_iterator & operator++() { return ++idx, ++it, *this; }
};
template <typename Collection> struct enumerate_wrapper
{
using underlying_iterator = decltype(std::begin(std::declval<Collection &>()));
using iterator = enumerate_iterator<underlying_iterator>;
Collection & collection;
enumerate_wrapper(Collection & collection_) : collection(collection_) {}
auto begin() { return iterator(0, std::begin(collection)); }
auto end() { return iterator(ext::size(collection), std::end(collection)); }
};
template <typename Collection> auto enumerate(Collection & collection)
{
return enumerate_wrapper<Collection>{collection};
}
template <typename Collection> auto enumerate(const Collection & collection)
{
return enumerate_wrapper<const Collection>{collection};
}
}

View File

@ -1,24 +0,0 @@
#pragma once
#include <utility>
namespace ext
{
/// \brief Identity function for use with other algorithms as a pass-through.
class identity
{
/** \brief Function pointer type template for converting identity to a function pointer.
* Presumably useless, provided for completeness. */
template <typename T> using function_ptr_t = T &&(*)(T &&);
/** \brief Implementation of identity as a non-instance member function for taking function pointer. */
template <typename T> static T && invoke(T && t) { return std::forward<T>(t); }
public:
/** \brief Returns the value passed as a sole argument using perfect forwarding. */
template <typename T> T && operator()(T && t) const { return std::forward<T>(t); }
/** \brief Allows conversion of identity instance to a function pointer. */
template <typename T> operator function_ptr_t<T>() const { return &invoke; };
};
}

View File

@ -1,43 +0,0 @@
#pragma once
#include <utility>
#include <type_traits>
#include <array>
/** \brief Produces std::array of specified size, containing copies of provided object.
* Copy is performed N-1 times, and the last element is being moved.
* This helper allows to initialize std::array in place.
*/
namespace ext
{
namespace detail
{
template<std::size_t size, typename T, std::size_t... indexes>
constexpr auto make_array_n_impl(T && value, std::index_sequence<indexes...>)
{
/// Comma is used to make N-1 copies of value
return std::array<std::decay_t<T>, size>{ (static_cast<void>(indexes), value)..., std::forward<T>(value) };
}
}
template<typename T>
constexpr auto make_array_n(std::integral_constant<std::size_t, 0>, T &&)
{
return std::array<std::decay_t<T>, 0>{};
}
template<std::size_t size, typename T>
constexpr auto make_array_n(std::integral_constant<std::size_t, size>, T && value)
{
return detail::make_array_n_impl<size>(std::forward<T>(value), std::make_index_sequence<size - 1>{});
}
template<std::size_t size, typename T>
constexpr auto make_array_n(T && value)
{
return make_array_n(std::integral_constant<std::size_t, size>{}, std::forward<T>(value));
}
}

View File

@ -1,51 +0,0 @@
#pragma once
#include <type_traits>
#include <boost/iterator/transform_iterator.hpp>
namespace ext
{
/// \brief Strip type off top level reference and cv-qualifiers thus allowing storage in containers
template <typename T>
using unqualified_t = std::remove_cv_t<std::remove_reference_t<T>>;
/** \brief Returns collection of the same container-type as the input collection,
* with each element transformed by the application of `mapper`.
*/
template <template <typename...> class Collection, typename... Params, typename Mapper>
auto map(const Collection<Params...> & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified container-type,
* with each element transformed by the application of `mapper`.
* Allows conversion between different container-types, e.g. std::vector to std::list
*/
template <template <typename...> class ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified type,
* with each element transformed by the application of `mapper`.
* Allows leveraging implicit conversion between the result of applying `mapper` and R::value_type.
*/
template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
}

View File

@ -1,25 +0,0 @@
#pragma once
#include <vector>
namespace ext
{
/// Moves all arguments starting from the second to the end of the vector.
/// For example, `push_back(vec, a1, a2, a3)` is a more compact way to write
/// `vec.push_back(a1); vec.push_back(a2); vec.push_back(a3);`
/// This function is like boost::range::push_back() but works for noncopyable types too.
template <typename T>
void push_back(std::vector<T> &)
{
}
template <typename T, typename FirstArg, typename... OtherArgs>
void push_back(std::vector<T> & vec, FirstArg && first, OtherArgs &&... other)
{
vec.reserve(vec.size() + sizeof...(other) + 1);
vec.emplace_back(std::move(first));
push_back(vec, std::move(other)...);
}
}

View File

@ -1,14 +0,0 @@
#pragma once
#include <cstdlib>
namespace ext
{
/** \brief Returns number of elements in an automatic array. */
template <typename T, std::size_t N>
constexpr std::size_t size(const T (&)[N]) noexcept { return N; }
/** \brief Returns number of in a container providing size() member function. */
template <typename T> constexpr auto size(const T & t) { return t.size(); }
}

View File

@ -1,27 +0,0 @@
#pragma once
namespace ext
{
template <typename T>
class unlock_guard
{
public:
unlock_guard(T & mutex_) : mutex(mutex_)
{
mutex.unlock();
}
~unlock_guard()
{
mutex.lock();
}
unlock_guard(const unlock_guard &) = delete;
unlock_guard & operator=(const unlock_guard &) = delete;
private:
T & mutex;
};
}

View File

@ -8,13 +8,6 @@
extern "C" { extern "C" {
#endif #endif
#include <pthread.h>
size_t __pthread_get_minstack(const pthread_attr_t * attr)
{
return 1048576; /// This is a guess. Don't sure it is correct.
}
#include <signal.h> #include <signal.h>
#include <unistd.h> #include <unistd.h>
#include <string.h> #include <string.h>
@ -141,6 +134,8 @@ int __open_2(const char *path, int oflag)
} }
#include <pthread.h>
/// No-ops. /// No-ops.
int pthread_setname_np(pthread_t thread, const char *name) { return 0; } int pthread_setname_np(pthread_t thread, const char *name) { return 0; }
int pthread_getname_np(pthread_t thread, char *name, size_t len) { name[0] = '\0'; return 0; }; int pthread_getname_np(pthread_t thread, char *name, size_t len) { name[0] = '\0'; return 0; };

View File

@ -2,7 +2,7 @@
#include <errmsg.h> #include <errmsg.h>
#include <mysql.h> #include <mysql.h>
#else #else
#include <mysql/errmsg.h> #include <mysql/errmsg.h> //Y_IGNORE
#include <mysql/mysql.h> #include <mysql/mysql.h>
#endif #endif

39
base/mysqlxx/ya.make Normal file
View File

@ -0,0 +1,39 @@
# This file is generated automatically, do not edit. See 'ya.make.in' and use 'utils/generate-ya-make' to regenerate it.
LIBRARY()
OWNER(g:clickhouse)
CFLAGS(-g0)
PEERDIR(
contrib/restricted/boost/libs
contrib/libs/libmysql_r
contrib/libs/poco/Foundation
contrib/libs/poco/Util
)
ADDINCL(
GLOBAL clickhouse/base
clickhouse/base
contrib/libs/libmysql_r
)
NO_COMPILER_WARNINGS()
NO_UTIL()
SRCS(
Connection.cpp
Exception.cpp
Pool.cpp
PoolFactory.cpp
PoolWithFailover.cpp
Query.cpp
ResultBase.cpp
Row.cpp
UseQueryResult.cpp
Value.cpp
)
END()

28
base/mysqlxx/ya.make.in Normal file
View File

@ -0,0 +1,28 @@
LIBRARY()
OWNER(g:clickhouse)
CFLAGS(-g0)
PEERDIR(
contrib/restricted/boost/libs
contrib/libs/libmysql_r
contrib/libs/poco/Foundation
contrib/libs/poco/Util
)
ADDINCL(
GLOBAL clickhouse/base
clickhouse/base
contrib/libs/libmysql_r
)
NO_COMPILER_WARNINGS()
NO_UTIL()
SRCS(
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F examples | sed 's/^\.\// /' | sort ?>
)
END()

View File

@ -4,6 +4,7 @@ RECURSE(
common common
daemon daemon
loggers loggers
mysqlxx
pcg-random pcg-random
widechar_width widechar_width
readpassphrase readpassphrase

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 2a1bf7d87b4a03561fc66fbb49cee8a288983c5d Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit 616b3dc76a0c8450b4027ded8a78e9619d7c845f Subproject commit debf751a129bdda9ff4d1e895e08957ff77000a1

View File

@ -188,6 +188,7 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/array/util.cc" "${LIBRARY_DIR}/array/util.cc"
"${LIBRARY_DIR}/array/validate.cc" "${LIBRARY_DIR}/array/validate.cc"
"${LIBRARY_DIR}/compute/api_aggregate.cc"
"${LIBRARY_DIR}/compute/api_scalar.cc" "${LIBRARY_DIR}/compute/api_scalar.cc"
"${LIBRARY_DIR}/compute/api_vector.cc" "${LIBRARY_DIR}/compute/api_vector.cc"
"${LIBRARY_DIR}/compute/cast.cc" "${LIBRARY_DIR}/compute/cast.cc"
@ -198,8 +199,11 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc" "${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc" "${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_quantile.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_tdigest.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc" "${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc"
"${LIBRARY_DIR}/compute/kernels/codegen_internal.cc" "${LIBRARY_DIR}/compute/kernels/codegen_internal.cc"
"${LIBRARY_DIR}/compute/kernels/hash_aggregate.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc" "${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc" "${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc" "${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc"
@ -243,6 +247,7 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/io/interfaces.cc" "${LIBRARY_DIR}/io/interfaces.cc"
"${LIBRARY_DIR}/io/memory.cc" "${LIBRARY_DIR}/io/memory.cc"
"${LIBRARY_DIR}/io/slow.cc" "${LIBRARY_DIR}/io/slow.cc"
"${LIBRARY_DIR}/io/transform.cc"
"${LIBRARY_DIR}/tensor/coo_converter.cc" "${LIBRARY_DIR}/tensor/coo_converter.cc"
"${LIBRARY_DIR}/tensor/csf_converter.cc" "${LIBRARY_DIR}/tensor/csf_converter.cc"
@ -256,11 +261,8 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/util/bitmap_builders.cc" "${LIBRARY_DIR}/util/bitmap_builders.cc"
"${LIBRARY_DIR}/util/bitmap_ops.cc" "${LIBRARY_DIR}/util/bitmap_ops.cc"
"${LIBRARY_DIR}/util/bpacking.cc" "${LIBRARY_DIR}/util/bpacking.cc"
"${LIBRARY_DIR}/util/cancel.cc"
"${LIBRARY_DIR}/util/compression.cc" "${LIBRARY_DIR}/util/compression.cc"
"${LIBRARY_DIR}/util/compression_lz4.cc"
"${LIBRARY_DIR}/util/compression_snappy.cc"
"${LIBRARY_DIR}/util/compression_zlib.cc"
"${LIBRARY_DIR}/util/compression_zstd.cc"
"${LIBRARY_DIR}/util/cpu_info.cc" "${LIBRARY_DIR}/util/cpu_info.cc"
"${LIBRARY_DIR}/util/decimal.cc" "${LIBRARY_DIR}/util/decimal.cc"
"${LIBRARY_DIR}/util/delimiting.cc" "${LIBRARY_DIR}/util/delimiting.cc"
@ -268,13 +270,14 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/util/future.cc" "${LIBRARY_DIR}/util/future.cc"
"${LIBRARY_DIR}/util/int_util.cc" "${LIBRARY_DIR}/util/int_util.cc"
"${LIBRARY_DIR}/util/io_util.cc" "${LIBRARY_DIR}/util/io_util.cc"
"${LIBRARY_DIR}/util/iterator.cc"
"${LIBRARY_DIR}/util/key_value_metadata.cc" "${LIBRARY_DIR}/util/key_value_metadata.cc"
"${LIBRARY_DIR}/util/logging.cc" "${LIBRARY_DIR}/util/logging.cc"
"${LIBRARY_DIR}/util/memory.cc" "${LIBRARY_DIR}/util/memory.cc"
"${LIBRARY_DIR}/util/mutex.cc"
"${LIBRARY_DIR}/util/string_builder.cc" "${LIBRARY_DIR}/util/string_builder.cc"
"${LIBRARY_DIR}/util/string.cc" "${LIBRARY_DIR}/util/string.cc"
"${LIBRARY_DIR}/util/task_group.cc" "${LIBRARY_DIR}/util/task_group.cc"
"${LIBRARY_DIR}/util/tdigest.cc"
"${LIBRARY_DIR}/util/thread_pool.cc" "${LIBRARY_DIR}/util/thread_pool.cc"
"${LIBRARY_DIR}/util/time.cc" "${LIBRARY_DIR}/util/time.cc"
"${LIBRARY_DIR}/util/trie.cc" "${LIBRARY_DIR}/util/trie.cc"
@ -368,14 +371,14 @@ set(PARQUET_SRCS
"${LIBRARY_DIR}/column_reader.cc" "${LIBRARY_DIR}/column_reader.cc"
"${LIBRARY_DIR}/column_scanner.cc" "${LIBRARY_DIR}/column_scanner.cc"
"${LIBRARY_DIR}/column_writer.cc" "${LIBRARY_DIR}/column_writer.cc"
"${LIBRARY_DIR}/deprecated_io.cc"
"${LIBRARY_DIR}/encoding.cc" "${LIBRARY_DIR}/encoding.cc"
"${LIBRARY_DIR}/encryption.cc" "${LIBRARY_DIR}/encryption/encryption.cc"
"${LIBRARY_DIR}/encryption_internal.cc" "${LIBRARY_DIR}/encryption/encryption_internal.cc"
"${LIBRARY_DIR}/encryption/internal_file_decryptor.cc"
"${LIBRARY_DIR}/encryption/internal_file_encryptor.cc"
"${LIBRARY_DIR}/exception.cc"
"${LIBRARY_DIR}/file_reader.cc" "${LIBRARY_DIR}/file_reader.cc"
"${LIBRARY_DIR}/file_writer.cc" "${LIBRARY_DIR}/file_writer.cc"
"${LIBRARY_DIR}/internal_file_decryptor.cc"
"${LIBRARY_DIR}/internal_file_encryptor.cc"
"${LIBRARY_DIR}/level_conversion.cc" "${LIBRARY_DIR}/level_conversion.cc"
"${LIBRARY_DIR}/level_comparison.cc" "${LIBRARY_DIR}/level_comparison.cc"
"${LIBRARY_DIR}/metadata.cc" "${LIBRARY_DIR}/metadata.cc"
@ -385,6 +388,8 @@ set(PARQUET_SRCS
"${LIBRARY_DIR}/properties.cc" "${LIBRARY_DIR}/properties.cc"
"${LIBRARY_DIR}/schema.cc" "${LIBRARY_DIR}/schema.cc"
"${LIBRARY_DIR}/statistics.cc" "${LIBRARY_DIR}/statistics.cc"
"${LIBRARY_DIR}/stream_reader.cc"
"${LIBRARY_DIR}/stream_writer.cc"
"${LIBRARY_DIR}/types.cc" "${LIBRARY_DIR}/types.cc"
"${GEN_LIBRARY_DIR}/parquet_constants.cpp" "${GEN_LIBRARY_DIR}/parquet_constants.cpp"

2
contrib/flatbuffers vendored

@ -1 +1 @@
Subproject commit 22e3ffc66d2d7d72d1414390aa0f04ffd114a5a1 Subproject commit eb3f827948241ce0e701516f16cd67324802bce9

2
contrib/libpqxx vendored

@ -1 +1 @@
Subproject commit 58d2a028d1600225ac3a478d6b3a06ba2f0c01f6 Subproject commit 357608d11b7a1961c3fb7db2ef9a5dbb2e87da77

View File

@ -64,7 +64,7 @@ set (HDRS
add_library(libpqxx ${SRCS} ${HDRS}) add_library(libpqxx ${SRCS} ${HDRS})
target_link_libraries(libpqxx PUBLIC ${LIBPQ_LIBRARY}) target_link_libraries(libpqxx PUBLIC ${LIBPQ_LIBRARY})
target_include_directories (libpqxx PRIVATE "${LIBRARY_DIR}/include") target_include_directories (libpqxx SYSTEM PRIVATE "${LIBRARY_DIR}/include")
# crutch # crutch
set(CM_CONFIG_H_IN "${LIBRARY_DIR}/include/pqxx/config.h.in") set(CM_CONFIG_H_IN "${LIBRARY_DIR}/include/pqxx/config.h.in")

View File

@ -1,7 +1,7 @@
add_library(murmurhash add_library(murmurhash
src/murmurhash2.cpp src/MurmurHash2.cpp
src/murmurhash3.cpp src/MurmurHash3.cpp
include/murmurhash2.h include/MurmurHash2.h
include/murmurhash3.h) include/MurmurHash3.h)
target_include_directories (murmurhash PUBLIC include) target_include_directories (murmurhash PUBLIC include)

View File

@ -0,0 +1,49 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef MURMURHASH2_H
#define MURMURHASH2_H
#include <stddef.h>
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed );
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed );
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed );
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed );
#ifdef __cplusplus
}
#endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH2_H_

View File

@ -2,7 +2,10 @@
// MurmurHash3 was written by Austin Appleby, and is placed in the public // MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code. // domain. The author hereby disclaims copyright to this source code.
#pragma once #ifndef MURMURHASH3_H
#define MURMURHASH3_H
#include <stddef.h>
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Platform-specific functions and macros // Platform-specific functions and macros
@ -23,20 +26,22 @@ typedef unsigned __int64 uint64_t;
#endif // !defined(_MSC_VER) #endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
//----------------------------------------------------------------------------- void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
//-----------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_

View File

@ -1,31 +0,0 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#pragma once
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2 (const void * key, int len, uint32_t seed);
uint64_t MurmurHash64A (const void * key, int len, uint64_t seed);
uint64_t MurmurHash64B (const void * key, int len, uint64_t seed);
uint32_t MurmurHash2A (const void * key, int len, uint32_t seed);
uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed);
uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed);

View File

@ -0,0 +1,523 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "MurmurHash2.h"
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed )
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed )
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = (const uint64_t *)key;
const uint64_t * end = data + (len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = uint32_t(seed) ^ len;
uint32_t h2 = uint32_t(seed >> 32);
const uint32_t * data = (const uint32_t *)key;
while(len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if(len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch(len)
{
case 3: h2 ^= ((unsigned char*)data)[2] << 16;
case 2: h2 ^= ((unsigned char*)data)[1] << 8;
case 1: h2 ^= ((unsigned char*)data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch(len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// CMurmurHash2A, by Austin Appleby
// This is a sample implementation of MurmurHash2A designed to work
// incrementally.
// Usage -
// CMurmurHash2A hasher
// hasher.Begin(seed);
// hasher.Add(data1,size1);
// hasher.Add(data2,size2);
// ...
// hasher.Add(dataN,sizeN);
// uint32_t hash = hasher.End()
class CMurmurHash2A
{
public:
void Begin ( uint32_t seed = 0 )
{
m_hash = seed;
m_tail = 0;
m_count = 0;
m_size = 0;
}
void Add ( const unsigned char * data, size_t len )
{
m_size += len;
MixTail(data,len);
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(m_hash,k);
data += 4;
len -= 4;
}
MixTail(data,len);
}
uint32_t End ( void )
{
mmix(m_hash,m_tail);
mmix(m_hash,m_size);
m_hash ^= m_hash >> 13;
m_hash *= m;
m_hash ^= m_hash >> 15;
return m_hash;
}
private:
static const uint32_t m = 0x5bd1e995;
static const int r = 24;
void MixTail ( const unsigned char * & data, size_t & len )
{
while( len && ((len<4) || m_count) )
{
m_tail |= (*data++) << (m_count * 8);
m_count++;
len--;
if(m_count == 4)
{
mmix(m_hash,m_tail);
m_tail = 0;
m_count = 0;
}
}
}
uint32_t m_hash;
uint32_t m_tail;
uint32_t m_count;
uint32_t m_size;
};
//-----------------------------------------------------------------------------
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed ^ len;
size_t align = (uint64_t)data & 3;
if(align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch(align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while(len >= 4)
{
d = *(uint32_t *)data;
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if(len >= align)
{
switch(align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch(len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while(len >= 4)
{
uint32_t k = *(uint32_t *)data;
MIX(h,k,m);
data += 4;
len -= 4;
}
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}
//-----------------------------------------------------------------------------

View File

@ -1,3 +1,4 @@
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public // MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code. // domain. The author hereby disclaims copyright to this source code.
@ -6,8 +7,8 @@
// compile and run any of them on any platform, but your performance with the // compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal. // non-native version will be less than optimal.
#include "murmurhash3.h" #include "MurmurHash3.h"
#include <cstring> #include <string.h>
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Platform-specific functions and macros // Platform-specific functions and macros
@ -93,7 +94,7 @@ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, int len, void MurmurHash3_x86_32 ( const void * key, size_t len,
uint32_t seed, void * out ) uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -149,7 +150,7 @@ void MurmurHash3_x86_32 ( const void * key, int len,
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x86_128 ( const void * key, const int len, void MurmurHash3_x86_128 ( const void * key, const size_t len,
uint32_t seed, void * out ) uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -254,7 +255,7 @@ void MurmurHash3_x86_128 ( const void * key, const int len,
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x64_128 ( const void * key, const int len, void MurmurHash3_x64_128 ( const void * key, const size_t len,
const uint32_t seed, void * out ) const uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -332,3 +333,6 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
((uint64_t*)out)[0] = h1; ((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2; ((uint64_t*)out)[1] = h2;
} }
//-----------------------------------------------------------------------------

View File

@ -1,423 +0,0 @@
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "murmurhash2.h"
#include <cstring>
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2(const void * key, int len, uint32_t seed)
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
memcpy(&k, data, sizeof(k));
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A(const void * key, int len, uint64_t seed)
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = reinterpret_cast<const uint64_t *>(key);
const uint64_t * end = data + (len/8);
while (data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = reinterpret_cast<const unsigned char *>(data);
switch (len & 7)
{
case 7: h ^= static_cast<uint64_t>(data2[6]) << 48;
case 6: h ^= static_cast<uint64_t>(data2[5]) << 40;
case 5: h ^= static_cast<uint64_t>(data2[4]) << 32;
case 4: h ^= static_cast<uint64_t>(data2[3]) << 24;
case 3: h ^= static_cast<uint64_t>(data2[2]) << 16;
case 2: h ^= static_cast<uint64_t>(data2[1]) << 8;
case 1: h ^= static_cast<uint64_t>(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B(const void * key, int len, uint64_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = static_cast<uint32_t>(seed) ^ len;
uint32_t h2 = static_cast<uint32_t>(seed >> 32);
const uint32_t * data = reinterpret_cast<const uint32_t *>(key);
while (len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if (len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch (len)
{
case 3: h2 ^= reinterpret_cast<const unsigned char *>(data)[2] << 16;
case 2: h2 ^= reinterpret_cast<const unsigned char *>(data)[1] << 8;
case 1: h2 ^= reinterpret_cast<const unsigned char *>(data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed;
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch (len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed ^ len;
int align = reinterpret_cast<uint64_t>(data) & 3;
if (align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch (align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while (len >= 4)
{
d = *(reinterpret_cast<const uint32_t *>(data));
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if (len >= align)
{
switch (align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch (len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
MIX(h,k,m);
data += 4;
len -= 4;
}
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit 5981208e39447df84827f6a961d1da76bacb6078 Subproject commit 0a936f6bbdb9303308973073f8623b5a8d82eae1

2
contrib/replxx vendored

@ -1 +1 @@
Subproject commit 2b24f14594d7606792b92544bb112a6322ba34d7 Subproject commit c81be6c68b146f15f2096b7ef80e3f21fe27004c

View File

@ -200,7 +200,7 @@ continue
# The server has died. # The server has died.
task_exit_code=210 task_exit_code=210
echo "failure" > status.txt echo "failure" > status.txt
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
then then
echo "Lost connection to server. See the logs." > description.txt echo "Lost connection to server. See the logs." > description.txt
fi fi
@ -220,8 +220,8 @@ continue
# which is confusing. # which is confusing.
task_exit_code=$fuzzer_exit_code task_exit_code=$fuzzer_exit_code
echo "failure" > status.txt echo "failure" > status.txt
{ grep -o "Found error:.*" fuzzer.log \ { grep --text -o "Found error:.*" fuzzer.log \
|| grep -o "Exception.*" fuzzer.log \ || grep --text -o "Exception.*" fuzzer.log \
|| echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \ || echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \
| tail -1 > description.txt | tail -1 > description.txt
fi fi

View File

@ -489,7 +489,7 @@ if args.report == 'main':
text = tableStart('Test Times') text = tableStart('Test Times')
text += tableHeader(columns, attrs) text += tableHeader(columns, attrs)
allowed_average_run_time = 1.6 # 30 seconds per test at 7 runs allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs
for r in rows: for r in rows:
anchor = f'{currentTableAnchor()}.{r[0]}' anchor = f'{currentTableAnchor()}.{r[0]}'
total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers

View File

@ -112,12 +112,15 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||:
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv ./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||:
mv /var/log/clickhouse-server/stderr.log /test_output/ ||: mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||: tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
fi fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:

View File

@ -14,6 +14,8 @@ HUNG_SIGN = "Found hung queries in processlist"
NO_TASK_TIMEOUT_SIGN = "All tests have finished" NO_TASK_TIMEOUT_SIGN = "All tests have finished"
RETRIES_SIGN = "Some tests were restarted"
def process_test_log(log_path): def process_test_log(log_path):
total = 0 total = 0
skipped = 0 skipped = 0
@ -21,6 +23,7 @@ def process_test_log(log_path):
failed = 0 failed = 0
success = 0 success = 0
hung = False hung = False
retries = False
task_timeout = True task_timeout = True
test_results = [] test_results = []
with open(log_path, 'r') as test_file: with open(log_path, 'r') as test_file:
@ -30,6 +33,8 @@ def process_test_log(log_path):
task_timeout = False task_timeout = False
if HUNG_SIGN in line: if HUNG_SIGN in line:
hung = True hung = True
if RETRIES_SIGN in line:
retries = True
if any(sign in line for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)): if any(sign in line for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)):
test_name = line.split(' ')[2].split(':')[0] test_name = line.split(' ')[2].split(':')[0]
@ -57,7 +62,7 @@ def process_test_log(log_path):
else: else:
success += int(OK_SIGN in line) success += int(OK_SIGN in line)
test_results.append((test_name, "OK", test_time)) test_results.append((test_name, "OK", test_time))
return total, skipped, unknown, failed, success, hung, task_timeout, test_results return total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results
def process_result(result_path): def process_result(result_path):
test_results = [] test_results = []
@ -73,7 +78,7 @@ def process_result(result_path):
state = "error" state = "error"
if result_path and os.path.exists(result_path): if result_path and os.path.exists(result_path):
total, skipped, unknown, failed, success, hung, task_timeout, test_results = process_test_log(result_path) total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results = process_test_log(result_path)
is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1)) is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1))
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately) # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped. # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
@ -83,9 +88,14 @@ def process_result(result_path):
if hung: if hung:
description = "Some queries hung, " description = "Some queries hung, "
state = "failure" state = "failure"
test_results.append(("Some queries hung", "FAIL", "0"))
elif task_timeout: elif task_timeout:
description = "Timeout, " description = "Timeout, "
state = "failure" state = "failure"
test_results.append(("Timeout", "FAIL", "0"))
elif retries:
description = "Some tests restarted, "
test_results.append(("Some tests restarted", "SKIPPED", "0"))
else: else:
description = "" description = ""

View File

@ -103,6 +103,7 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||:
clickhouse-client -q "system flush logs" ||: clickhouse-client -q "system flush logs" ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz & pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz & clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz & clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
@ -140,6 +141,8 @@ tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_l
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:

View File

@ -2,18 +2,16 @@
## TL; DR How to make ClickHouse compile and link faster? ## TL; DR How to make ClickHouse compile and link faster?
Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`. Minimal ClickHouse build example:
```cmake ```bash
cmake .. \ cmake .. \
-DCMAKE_C_COMPILER=/bin/clang-10 \ -DCMAKE_C_COMPILER=$(which clang-11) \
-DCMAKE_CXX_COMPILER=/bin/clang++-10 \ -DCMAKE_CXX_COMPILER=$(which clang++-11) \
-DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_BUILD_TYPE=Debug \
-DENABLE_CLICKHOUSE_ALL=OFF \ -DENABLE_CLICKHOUSE_ALL=OFF \
-DENABLE_CLICKHOUSE_SERVER=ON \ -DENABLE_CLICKHOUSE_SERVER=ON \
-DENABLE_CLICKHOUSE_CLIENT=ON \ -DENABLE_CLICKHOUSE_CLIENT=ON \
-DUSE_STATIC_LIBRARIES=OFF \
-DSPLIT_SHARED_LIBRARIES=ON \
-DENABLE_LIBRARIES=OFF \ -DENABLE_LIBRARIES=OFF \
-DUSE_UNWIND=ON \ -DUSE_UNWIND=ON \
-DENABLE_UTILS=OFF \ -DENABLE_UTILS=OFF \

0
docs/clean Normal file
View File

View File

@ -112,7 +112,7 @@ A hand-written recursive descent parser parses a query. For example, `ParserSele
Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the INSERT query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time. Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the INSERT query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time.
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations or query. `InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations of query.
## Functions {#functions} ## Functions {#functions}
@ -169,7 +169,7 @@ There is no global query plan for distributed query execution. Each node has its
`MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows. `MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows.
The primary key itself is “sparse”. It does not address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached. The primary key itself is “sparse”. It does not address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks”, which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached.
When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table. When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table.

View File

@ -126,7 +126,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t
- **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`). - **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`).
- **Build type**: `Debug` or `RelWithDebInfo` (cmake). - **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan). - **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Bundled**: `bundled` build uses system libraries, and `unbundled` build uses libraries from `contrib` folder. - **Bundled**: `bundled` build uses libraries from `contrib` folder, and `unbundled` build uses system libraries.
- **Splitted** `splitted` is a [split build](build.md#split-build) - **Splitted** `splitted` is a [split build](build.md#split-build)
- **Status**: `success` or `fail` - **Status**: `success` or `fail`
- **Build log**: link to the building and files copying log, useful when build failed. - **Build log**: link to the building and files copying log, useful when build failed.

View File

@ -17,7 +17,7 @@ Main features:
- Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. - Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified.
ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query.
- Data replication support. - Data replication support.
@ -83,7 +83,7 @@ For a description of parameters, see the [CREATE query description](../../../sql
Expression must have one `Date` or `DateTime` column as a result. Example: Expression must have one `Date` or `DateTime` column as a result. Example:
`TTL date + INTERVAL 1 DAY` `TTL date + INTERVAL 1 DAY`
Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can specified, but there should be no more than one `DELETE` rule. Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule.
For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl) For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl)
@ -474,7 +474,7 @@ With `WHERE` clause you may specify which of the expired rows to delete or aggre
`GROUP BY` expression must be a prefix of the table primary key. `GROUP BY` expression must be a prefix of the table primary key.
If a column is not part of the `GROUP BY` expression and is not set explicitely in the `SET` clause, in result row it contains an occasional value from the grouped rows (as if aggregate function `any` is applied to it). If a column is not part of the `GROUP BY` expression and is not set explicitly in the `SET` clause, in result row it contains an occasional value from the grouped rows (as if aggregate function `any` is applied to it).
**Examples** **Examples**
@ -695,7 +695,8 @@ PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy = 'moving_from_ssd_to_hdd' SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
``` ```
The `default` storage policy implies using only one volume, which consists of only one disk given in `<path>`. Once a table is created, its storage policy cannot be changed. The `default` storage policy implies using only one volume, which consists of only one disk given in `<path>`.
You could change storage policy after table creation with [ALTER TABLE ... MODIFY SETTING] query, new policy should include all old disks and volumes with same names.
The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting. The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting.

View File

@ -96,7 +96,7 @@ SELECT key, sum(value) FROM summtt GROUP BY key
When data are inserted into a table, they are saved as-is. ClickHouse merges the inserted parts of data periodically and this is when rows with the same primary key are summed and replaced with one for each resulting part of data. When data are inserted into a table, they are saved as-is. ClickHouse merges the inserted parts of data periodically and this is when rows with the same primary key are summed and replaced with one for each resulting part of data.
ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) an aggregate function [sum()](../../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum) and `GROUP BY` clause should be used in a query as described in the example above. ClickHouse can merge the data parts so that different resulting parts of data can consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) an aggregate function [sum()](../../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum) and `GROUP BY` clause should be used in a query as described in the example above.
### Common Rules for Summation {#common-rules-for-summation} ### Common Rules for Summation {#common-rules-for-summation}

View File

@ -1249,10 +1249,13 @@ The table below shows supported data types and how they match ClickHouse [data t
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` | | — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type. ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type.
Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. Unsupported Parquet data types: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column.
@ -1276,7 +1279,55 @@ To exchange data with Hadoop, you can use [HDFS table engine](../engines/table-e
[Apache Arrow](https://arrow.apache.org/) comes with two built-in columnar storage formats. ClickHouse supports read and write operations for these formats. [Apache Arrow](https://arrow.apache.org/) comes with two built-in columnar storage formats. ClickHouse supports read and write operations for these formats.
`Arrow` is Apache Arrows “file mode” format. It is designed for in-memory random access. `Arrow` is Apache Arrows "file mode" format. It is designed for in-memory random access.
### Data Types Matching {#data_types-matching-arrow}
The table below shows supported data types and how they match ClickHouse [data types](../sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
| Arrow data type (`INSERT`) | ClickHouse data type | Arrow data type (`SELECT`) |
|----------------------------|-----------------------------------------------------|----------------------------|
| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) | `UINT8` |
| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) | `INT8` |
| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) | `UINT16` |
| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) | `INT16` |
| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) | `UINT32` |
| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) | `INT32` |
| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) | `UINT64` |
| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) | `INT64` |
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT32` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the Arrow `DECIMAL` type as the ClickHouse `Decimal128` type.
Unsupported Arrow data types: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
The data types of ClickHouse table columns do not have to match the corresponding Arrow data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) the data to the data type set for the ClickHouse table column.
### Inserting Data {#inserting-data-arrow}
You can insert Arrow data from a file into ClickHouse table by the following command:
``` bash
$ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow"
```
### Selecting Data {#selecting-data-arrow}
You can select data from a ClickHouse table and save them into some file in the Arrow format by the following command:
``` bash
$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow}
```
## ArrowStream {#data-format-arrow-stream} ## ArrowStream {#data-format-arrow-stream}
@ -1306,7 +1357,9 @@ The table below shows supported data types and how they match ClickHouse [data t
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `-` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
Arrays can be nested and can have a value of the `Nullable` type as an argument.
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type. ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type.

View File

@ -148,5 +148,11 @@ toc_title: Adopters
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) | | <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) | | <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) | | <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -191,10 +191,12 @@ Possible values:
Default value: 480. Default value: 480.
`fsync` is not called for new parts, so for some time new parts exist only in the server's RAM (OS cache). If the server is rebooted spontaneously, new parts can be lost or damaged. After merging several parts into a new part, ClickHouse marks the original parts as inactive and deletes them only after `old_parts_lifetime` seconds.
To protect data parts created by merges source parts are not deleted immediately. After merging several parts into a new part, ClickHouse marks the original parts as inactive and deletes them only after `old_parts_lifetime` seconds.
Inactive parts are removed if they are not used by current queries, i.e. if the `refcount` of the part is zero. Inactive parts are removed if they are not used by current queries, i.e. if the `refcount` of the part is zero.
`fsync` is not called for new parts, so for some time new parts exist only in the server's RAM (OS cache). If the server is rebooted spontaneously, new parts can be lost or damaged.
To protect data inactive parts are not deleted immediately.
During startup ClickHouse checks the integrity of the parts. During startup ClickHouse checks the integrity of the parts.
If the merged part is damaged ClickHouse returns the inactive parts to the active list, and later merges them again. Then the damaged part is renamed (the `broken_` prefix is added) and moved to the `detached` folder. If the merged part is damaged ClickHouse returns the inactive parts to the active list, and later merges them again. Then the damaged part is renamed (the `broken_` prefix is added) and moved to the `detached` folder.
If the merged part is not damaged, then the original inactive parts are renamed (the `ignored_` prefix is added) and moved to the `detached` folder. If the merged part is not damaged, then the original inactive parts are renamed (the `ignored_` prefix is added) and moved to the `detached` folder.
@ -214,7 +216,7 @@ Default value: 161061273600 (150 GB).
The merge scheduler periodically analyzes the sizes and number of parts in partitions, and if there is enough free resources in the pool, it starts background merges. Merges occur until the total size of the source parts is less than `max_bytes_to_merge_at_max_space_in_pool`. The merge scheduler periodically analyzes the sizes and number of parts in partitions, and if there is enough free resources in the pool, it starts background merges. Merges occur until the total size of the source parts is less than `max_bytes_to_merge_at_max_space_in_pool`.
Merges initiated by `optimize final` ignore `max_bytes_to_merge_at_max_space_in_pool` and merge parts only taking into account available resources (free disk's space) until one part remains in the partition. Merges initiated by [OPTIMIZE FINAL](../../sql-reference/statements/optimize.md) ignore `max_bytes_to_merge_at_max_space_in_pool` and merge parts only taking into account available resources (free disk's space) until one part remains in the partition.
## max_bytes_to_merge_at_min_space_in_pool {#max-bytes-to-merge-at-min-space-in-pool} ## max_bytes_to_merge_at_min_space_in_pool {#max-bytes-to-merge-at-min-space-in-pool}
@ -252,6 +254,7 @@ Possible values:
Default value: auto (number of CPU cores). Default value: auto (number of CPU cores).
During startup ClickHouse reads all parts of all tables (reads files with metadata of parts) to build a list of all parts in memory. In some systems with a large number of parts this process can take a long time, and this time might be shortened by increasing `max_part_loading_threads` (if this process is not CPU and disk I/O bound). During startup ClickHouse reads all parts of all tables (reads files with metadata of parts) to build a list of all parts in memory. In some systems with a large number of parts this process can take a long time, and this time might be shortened by increasing `max_part_loading_threads` (if this process is not CPU and disk I/O bound).
## max_partitions_to_read {#max-partitions-to-read} ## max_partitions_to_read {#max-partitions-to-read}
Limits the maximum number of partitions that can be accessed in one query. Limits the maximum number of partitions that can be accessed in one query.

View File

@ -1591,6 +1591,18 @@ FORMAT PrettyCompactMonoBlock
Default value: 0 Default value: 0
## distributed_push_down_limit (#distributed-push-down-limit}
LIMIT will be applied on each shard separatelly. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.
Possible values:
- 0 - Disabled
- 1 - Enabled
!!! note "Note"
That with this setting the result of the query may be inaccurate.
## optimize_skip_unused_shards_limit {#optimize-skip-unused-shards-limit} ## optimize_skip_unused_shards_limit {#optimize-skip-unused-shards-limit}
Limit for number of sharding key values, turns off `optimize_skip_unused_shards` if the limit is reached. Limit for number of sharding key values, turns off `optimize_skip_unused_shards` if the limit is reached.
@ -2069,7 +2081,7 @@ Possible values:
- Any positive integer. - Any positive integer.
Default value: 16. Default value: 128.
## background_fetches_pool_size {#background_fetches_pool_size} ## background_fetches_pool_size {#background_fetches_pool_size}
@ -2549,17 +2561,6 @@ Result
└──────────────────────────┴───────┴───────────────────────────────────────────────────────┘ └──────────────────────────┴───────┴───────────────────────────────────────────────────────┘
``` ```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
Enables or disables integer values exceeding the range that is supported by the int data type.
Possible values:
- 1 — The bigint data type is enabled.
- 0 — The bigint data type is disabled.
Default value: `0`.
## persistent {#persistent} ## persistent {#persistent}
Disables persistency for the [Set](../../engines/table-engines/special/set.md#set) and [Join](../../engines/table-engines/special/join.md#join) table engines. Disables persistency for the [Set](../../engines/table-engines/special/set.md#set) and [Join](../../engines/table-engines/special/join.md#join) table engines.
@ -3078,4 +3079,69 @@ SELECT
FROM fuse_tbl FROM fuse_tbl
``` ```
## flatten_nested {#flatten-nested}
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns.
Possible values:
- 1 — Nested column is flattened to separate arrays.
- 0 — Nested column stays a single array of tuples.
Default value: `1`.
**Usage**
If the setting is set to `0`, it is possible to use an arbitrary level of nesting.
**Examples**
Query:
``` sql
SET flatten_nested = 1;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Result:
``` text
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n.a` Array(UInt32),
`n.b` Array(UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SET flatten_nested = 0;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Result:
``` text
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n` Nested(a UInt32, b UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide --> [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -30,14 +30,6 @@ Do not disable overcommit. The value `cat /proc/sys/vm/overcommit_memory` should
$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory
``` ```
## Huge Pages {#huge-pages}
Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation.
``` bash
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
Use `perf top` to watch the time spent in the kernel for memory management. Use `perf top` to watch the time spent in the kernel for memory management.
Permanent huge pages also do not need to be allocated. Permanent huge pages also do not need to be allocated.
@ -91,6 +83,15 @@ The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementati
Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data.
## Huge Pages {#huge-pages}
If you are using old Linux kernel, disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation.
On newer Linux kernels transparent huge pages are alright.
``` bash
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
## Hypervisor configuration ## Hypervisor configuration
If you are using OpenStack, set If you are using OpenStack, set

View File

@ -116,7 +116,7 @@ Type: `UInt8`.
- `.*` — Matches any number of events. You do not need conditional arguments to match this element of the pattern. - `.*` — Matches any number of events. You do not need conditional arguments to match this element of the pattern.
- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=` operators. - `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=`, `==` operators.
**Examples** **Examples**
@ -509,7 +509,7 @@ Same behavior as [sumMap](../../sql-reference/aggregate-functions/reference/summ
## sequenceNextNode {#sequenceNextNode} ## sequenceNextNode {#sequenceNextNode}
Returns a value of next event that matched an event chain. Returns a value of the next event that matched an event chain.
_Experimental function, `SET allow_experimental_funnel_functions = 1` to enable it._ _Experimental function, `SET allow_experimental_funnel_functions = 1` to enable it._
@ -520,33 +520,36 @@ sequenceNextNode(direction, base)(timestamp, event_column, base_condition, event
``` ```
**Parameters** **Parameters**
- `direction` - Used to navigate to directions.
- forward : Moving forward
- backward: Moving backward
- `base` - Used to set the base point. - `direction` — Used to navigate to directions.
- head : Set the base point to the first event - forward — Moving forward.
- tail : Set the base point to the last event - backward — Moving backward.
- first_match : Set the base point to the first matched event1
- last_match : Set the base point to the last matched event1 - `base` — Used to set the base point.
- head — Set the base point to the first event.
- tail — Set the base point to the last event.
- first_match — Set the base point to the first matched `event1`.
- last_match — Set the base point to the last matched `event1`.
**Arguments** **Arguments**
- `timestamp` — Name of the column containing the timestamp. Data types supported: `Date`, `DateTime` and other unsigned integer types.
- `event_column` — Name of the column containing the value of the next event to be returned. Data types supported: `String` and `Nullable(String)` - `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) and other unsigned integer types.
- `event_column` — Name of the column containing the value of the next event to be returned. Data types supported: [String](../../sql-reference/data-types/string.md) and [Nullable(String)](../../sql-reference/data-types/nullable.md).
- `base_condition` — Condition that the base point must fulfill. - `base_condition` — Condition that the base point must fulfill.
- `cond` — Conditions describing the chain of events. `UInt8` - `event1`, `event2`, ... — Conditions describing the chain of events. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value** **Returned values**
- `event_column[next_index]` - if the pattern is matched and next value exists.
- `NULL` - if the pattern isnt matched or next value doesn't exist.
Type: `Nullable(String)`. - `event_column[next_index]` — If the pattern is matched and next value exists.
- `NULL` - If the pattern isnt matched or next value doesn't exist.
Type: [Nullable(String)](../../sql-reference/data-types/nullable.md).
**Example** **Example**
It can be used when events are A->B->C->E->F and you want to know the event following B->C, which is E. It can be used when events are A->B->C->D->E and you want to know the event following B->C, which is D.
The query statement searching the event following A->B : The query statement searching the event following A->B:
``` sql ``` sql
CREATE TABLE test_flow ( CREATE TABLE test_flow (
@ -557,7 +560,7 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMMDD(dt) PARTITION BY toYYYYMMDD(dt)
ORDER BY id; ORDER BY id;
INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'E') (5, 1, 'F'); INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'D') (5, 1, 'E');
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'A', page = 'A', page = 'B') as next_flow FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'A', page = 'A', page = 'B') as next_flow FROM test_flow GROUP BY id;
``` ```
@ -572,7 +575,7 @@ Result:
**Behavior for `forward` and `head`** **Behavior for `forward` and `head`**
```SQL ``` sql
ALTER TABLE test_flow DELETE WHERE 1 = 1 settings mutations_sync = 1; ALTER TABLE test_flow DELETE WHERE 1 = 1 settings mutations_sync = 1;
INSERT INTO test_flow VALUES (1, 1, 'Home') (2, 1, 'Gift') (3, 1, 'Exit'); INSERT INTO test_flow VALUES (1, 1, 'Home') (2, 1, 'Gift') (3, 1, 'Exit');
@ -580,7 +583,7 @@ INSERT INTO test_flow VALUES (1, 2, 'Home') (2, 2, 'Home') (3, 2, 'Gift') (4, 2,
INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3, 'Basket'); INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3, 'Basket');
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id;
dt id page dt id page
@ -601,7 +604,7 @@ SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = '
**Behavior for `backward` and `tail`** **Behavior for `backward` and `tail`**
```SQL ``` sql
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page = 'Basket', page = 'Gift') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page = 'Basket', page = 'Gift') FROM test_flow GROUP BY id;
dt id page dt id page
@ -623,7 +626,7 @@ SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page
**Behavior for `forward` and `first_match`** **Behavior for `forward` and `first_match`**
```SQL ``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
dt id page dt id page
@ -637,12 +640,12 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
1970-01-01 09:00:04 2 Basket The result 1970-01-01 09:00:04 2 Basket The result
1970-01-01 09:00:01 3 Gift // Base point 1970-01-01 09:00:01 3 Gift // Base point
1970-01-01 09:00:02 3 Home // Thre result 1970-01-01 09:00:02 3 Home // The result
1970-01-01 09:00:03 3 Gift 1970-01-01 09:00:03 3 Gift
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
dt id page dt id page
@ -664,7 +667,7 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
**Behavior for `backward` and `last_match`** **Behavior for `backward` and `last_match`**
```SQL ``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
dt id page dt id page
@ -683,7 +686,7 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
1970-01-01 09:00:04 3 Basket 1970-01-01 09:00:04 3 Basket
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id; SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
dt id page dt id page
@ -705,7 +708,7 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
**Behavior for `base_condition`** **Behavior for `base_condition`**
```SQL ``` sql
CREATE TABLE test_flow_basecond CREATE TABLE test_flow_basecond
( (
`dt` DateTime, `dt` DateTime,
@ -715,47 +718,47 @@ CREATE TABLE test_flow_basecond
) )
ENGINE = MergeTree ENGINE = MergeTree
PARTITION BY toYYYYMMDD(dt) PARTITION BY toYYYYMMDD(dt)
ORDER BY id ORDER BY id;
INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3, 1, 'B', 'ref2') (4, 1, 'B', 'ref1'); INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3, 1, 'B', 'ref2') (4, 1, 'B', 'ref1');
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 // The head can't be base point becasue the ref column of the head unmatched with 'ref1'. 1970-01-01 09:00:01 1 A ref4 // The head can not be base point because the ref column of the head unmatched with 'ref1'.
1970-01-01 09:00:02 1 A ref3 1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2 1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1 1970-01-01 09:00:04 1 B ref1
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3 1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2 1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1 // The tail can't be base point becasue the ref column of the tail unmatched with 'ref4'. 1970-01-01 09:00:04 1 B ref1 // The tail can not be base point because the ref column of the tail unmatched with 'ref4'.
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 // This row can't be base point becasue the ref column unmatched with 'ref3'. 1970-01-01 09:00:01 1 A ref4 // This row can not be base point because the ref column unmatched with 'ref3'.
1970-01-01 09:00:02 1 A ref3 // Base point 1970-01-01 09:00:02 1 A ref3 // Base point
1970-01-01 09:00:03 1 B ref2 // The result 1970-01-01 09:00:03 1 B ref2 // The result
1970-01-01 09:00:04 1 B ref1 1970-01-01 09:00:04 1 B ref1
``` ```
```SQL ``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id; SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref dt id page ref
1970-01-01 09:00:01 1 A ref4 1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3 // The result 1970-01-01 09:00:02 1 A ref3 // The result
1970-01-01 09:00:03 1 B ref2 // Base point 1970-01-01 09:00:03 1 B ref2 // Base point
1970-01-01 09:00:04 1 B ref1 // This row can't be base point becasue the ref column unmatched with 'ref2'. 1970-01-01 09:00:04 1 B ref1 // This row can not be base point because the ref column unmatched with 'ref2'.
``` ```

View File

@ -1,37 +0,0 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Initializes aggregation for your input rows. It is intended for the functions with the suffix `State`.
Use it for tests or to process columns of types `AggregateFunction` and `AggregationgMergeTree`.
**Syntax**
``` sql
initializeAggregation (aggregate_function, column_1, column_2)
```
**Arguments**
- `aggregate_function` — Name of the aggregation function. The state of this function — the creating one. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — The column to translate it into the function as it's argument. [String](../../../sql-reference/data-types/string.md#string).
**Returned value(s)**
Returns the result of the aggregation for your input rows. The return type will be the same as the return type of function, that `initializeAgregation` takes as first argument.
For example for functions with the suffix `State` the return type will be `AggregateFunction`.
**Example**
Query:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Result:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -74,4 +74,26 @@ Received exception from server (version 1.1.54388):
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
``` ```
[Original article](https://clickhouse.tech/docs/en/data_types/array/) <!--hide--> ## Array Size {#array-size}
It is possible to find the size of an array by using the `size0` subcolumn without reading the whole column. For multi-dimensional arrays you can use `sizeN-1`, where `N` is the wanted dimension.
**Example**
Query:
```sql
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
```
Result:
``` text
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```

View File

@ -34,7 +34,7 @@ CREATE TABLE test.visits
This example declares the `Goals` nested data structure, which contains data about conversions (goals reached). Each row in the visits table can correspond to zero or any number of conversions. This example declares the `Goals` nested data structure, which contains data about conversions (goals reached). Each row in the visits table can correspond to zero or any number of conversions.
Only a single nesting level is supported. Columns of nested structures containing arrays are equivalent to multidimensional arrays, so they have limited support (there is no support for storing these columns in tables with the MergeTree engine). When [flatten_nested](../../../operations/settings/settings.md#flatten-nested) is set to `0` (which is not by default), arbitrary levels of nesting are supported.
In most cases, when working with a nested data structure, its columns are specified with column names separated by a dot. These columns make up an array of matching types. All the column arrays of a single nested data structure have the same length. In most cases, when working with a nested data structure, its columns are specified with column names separated by a dot. These columns make up an array of matching types. All the column arrays of a single nested data structure have the same length.

View File

@ -20,6 +20,33 @@ To store `Nullable` type values in a table column, ClickHouse uses a separate fi
!!! info "Note" !!! info "Note"
Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases. Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases.
## Finding NULL {#finding-null}
It is possible to find `NULL` values in a column by using `null` subcolumn without reading the whole column. It returns `1` if the corresponding value is `NULL` and `0` otherwise.
**Example**
Query:
``` sql
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
SELECT n.null FROM nullable;
```
Result:
``` text
┌─n.null─┐
│ 0 │
│ 1 │
│ 0 │
│ 1 │
└────────┘
```
## Usage Example {#usage-example} ## Usage Example {#usage-example}
``` sql ``` sql

View File

@ -47,4 +47,32 @@ SELECT tuple(1, NULL) AS x, toTypeName(x)
└──────────┴─────────────────────────────────┘ └──────────┴─────────────────────────────────┘
``` ```
## Addressing Tuple Elements {#addressing-tuple-elements}
It is possible to read elements of named tuples using indexes and names:
``` sql
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
SELECT a.s FROM named_tuples;
SELECT a.2 FROM named_tuples;
```
Result:
``` text
┌─a.s─┐
│ y │
│ x │
└─────┘
┌─tupleElement(a, 2)─┐
│ 10 │
│ -10 │
└────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide-->

View File

@ -60,7 +60,8 @@ SETTINGS(format_csv_allow_single_quotes = 0)
Types of sources (`source_type`): Types of sources (`source_type`):
- [Local file](#dicts-external_dicts_dict_sources-local_file) - [Local file](#dicts-external_dicts_dict_sources-local_file)
- [Executable file](#dicts-external_dicts_dict_sources-executable) - [Executable File](#dicts-external_dicts_dict_sources-executable)
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
- [HTTP(s)](#dicts-external_dicts_dict_sources-http) - [HTTP(s)](#dicts-external_dicts_dict_sources-http)
- DBMS - DBMS
- [ODBC](#dicts-external_dicts_dict_sources-odbc) - [ODBC](#dicts-external_dicts_dict_sources-odbc)
@ -94,7 +95,7 @@ SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated'))
Setting fields: Setting fields:
- `path` The absolute path to the file. - `path` The absolute path to the file.
- `format` The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported. - `format` The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported.
When dictionary with source `FILE` is created via DDL command (`CREATE DICTIONARY ...`), the source file needs to be located in `user_files` directory, to prevent DB users accessing arbitrary file on ClickHouse node. When dictionary with source `FILE` is created via DDL command (`CREATE DICTIONARY ...`), the source file needs to be located in `user_files` directory, to prevent DB users accessing arbitrary file on ClickHouse node.
@ -113,21 +114,24 @@ Example of settings:
<executable> <executable>
<command>cat /opt/dictionaries/os.tsv</command> <command>cat /opt/dictionaries/os.tsv</command>
<format>TabSeparated</format> <format>TabSeparated</format>
<implicit_key>false</implicit_key>
</executable> </executable>
</source> </source>
``` ```
Setting fields: Setting fields:
- `command` The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). - `command` The absolute path to the executable file, or the file name (if the program directory is written to `PATH`).
- `format` The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported. - `format` — The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported.
- `implicit_key` - The executable source file can return only values, and the correspondence to the requested keys is determined implicitly - by the order of rows in the result. Default value is false. - `implicit_key` The executable source file can return only values, and the correspondence to the requested keys is determined implicitly by the order of rows in the result. Default value is false.
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node.
## Executable Pool {#dicts-external_dicts_dict_sources-executable_pool} ## Executable Pool {#dicts-external_dicts_dict_sources-executable_pool}
Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary is stored using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct` layouts. Executable pool will spawn pool of processes with specified command and keep them running until they exit. The program should read data from STDIN while it is available and output result to STDOUT, and it can wait for next block of data on stdin. ClickHouse will not close STDIN after processing a block of data but will pipe another chunk of data when needed. The executable script should be ready for this way of data processing - it should poll STDIN and flush data to STDOUT early. Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct` layouts.
Executable pool will spawn pool of processes with specified command and keep them running until they exit. The program should read data from STDIN while it is available and output result to STDOUT, and it can wait for next block of data on STDIN. ClickHouse will not close STDIN after processing a block of data but will pipe another chunk of data when needed. The executable script should be ready for this way of data processing — it should poll STDIN and flush data to STDOUT early.
Example of settings: Example of settings:
@ -145,12 +149,12 @@ Example of settings:
Setting fields: Setting fields:
- `command` The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). - `command` The absolute path to the executable file, or the file name (if the program directory is written to `PATH`).
- `format` The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported. - `format` The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported.
- `pool_size` - Size of pool. If 0 is specified as `pool_size` then there is no pool size restrictions. - `pool_size` Size of pool. If 0 is specified as `pool_size` then there is no pool size restrictions.
- `command_termination_timeout` - Executable pool script, should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have command_termination_timeout seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter. - `command_termination_timeout` — Executable pool script should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter.
- `max_command_execution_time` - Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10. Optional parameter. - `max_command_execution_time` Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10. Optional parameter.
- `implicit_key` - The executable source file can return only values, and the correspondence to the requested keys is determined implicitly - by the order of rows in the result. Default value is false. Optional parameter. - `implicit_key` The executable source file can return only values, and the correspondence to the requested keys is determined implicitly by the order of rows in the result. Default value is false. Optional parameter.
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node.
@ -482,6 +486,7 @@ Example of settings:
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -499,6 +504,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```
@ -523,6 +529,8 @@ Setting fields:
- `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
- `fail_on_connection_loss` The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`.
MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`.
Example of settings: Example of settings:
@ -538,6 +546,7 @@ Example of settings:
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -554,6 +563,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```

View File

@ -39,13 +39,44 @@ Accepts zero arguments and returns an empty array of the appropriate type.
Accepts an empty array and returns a one-element array that is equal to the default value. Accepts an empty array and returns a one-element array that is equal to the default value.
## range(end), range(start, end \[, step\]) {#rangeend-rangestart-end-step}
Returns an array of numbers from start to end-1 by step. ## range(end), range(\[start, \] end \[, step\]) {#range}
If the argument `start` is not specified, defaults to 0.
If the argument `step` is not specified, defaults to 1. Returns an array of `UInt` numbers from `start` to `end - 1` by `step`.
It behaviors almost like pythonic `range`. But the difference is that all the arguments type must be `UInt` numbers.
Just in case, an exception is thrown if arrays with a total length of more than 100,000,000 elements are created in a data block. **Syntax**
``` sql
range([start, ] end [, step])
```
**Arguments**
- `start` — The first element of the array. Optional, required if `step` is used. Default value: 0. [UInt](../data-types/int-uint.md)
- `end` — The number before which the array is constructed. Required. [UInt](../data-types/int-uint.md)
- `step` — Determines the incremental step between each element in the array. Optional. Default value: 1. [UInt](../data-types/int-uint.md)
**Returned value**
- Array of `UInt` numbers from `start` to `end - 1` by `step`.
**Implementation details**
- All arguments must be positive values: `start`, `end`, `step` are `UInt` data types, as well as elements of the returned array.
- An exception is thrown if query results in arrays with a total length of more than 100,000,000 elements.
**Examples**
Query:
``` sql
SELECT range(5), range(1, 5), range(1, 5, 2);
```
Result:
```txt
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
└─────────────┴─────────────┴────────────────┘
```
## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} ## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1}

View File

@ -27,17 +27,17 @@ SELECT
Returns the timezone of the server. Returns the timezone of the server.
**Syntax** **Syntax**
``` sql ``` sql
timeZone() timeZone()
``` ```
Alias: `timezone`. Alias: `timezone`.
**Returned value** **Returned value**
- Timezone. - Timezone.
Type: [String](../../sql-reference/data-types/string.md). Type: [String](../../sql-reference/data-types/string.md).
@ -45,7 +45,7 @@ Type: [String](../../sql-reference/data-types/string.md).
Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly.
**Syntax** **Syntax**
``` sql ``` sql
toTimezone(value, timezone) toTimezone(value, timezone)
@ -53,14 +53,14 @@ toTimezone(value, timezone)
Alias: `toTimezone`. Alias: `toTimezone`.
**Arguments** **Arguments**
- `value` — Time or date and time. [DateTime64](../../sql-reference/data-types/datetime64.md). - `value` — Time or date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — Timezone for the returned value. [String](../../sql-reference/data-types/string.md). - `timezone` — Timezone for the returned value. [String](../../sql-reference/data-types/string.md).
**Returned value** **Returned value**
- Date and time. - Date and time.
Type: [DateTime](../../sql-reference/data-types/datetime.md). Type: [DateTime](../../sql-reference/data-types/datetime.md).
@ -102,21 +102,21 @@ int32samoa: 1546300800
Returns the timezone name of [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md) data types. Returns the timezone name of [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md) data types.
**Syntax** **Syntax**
``` sql ``` sql
timeZoneOf(value) timeZoneOf(value)
``` ```
Alias: `timezoneOf`. Alias: `timezoneOf`.
**Arguments** **Arguments**
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
**Returned value** **Returned value**
- Timezone name. - Timezone name.
Type: [String](../../sql-reference/data-types/string.md). Type: [String](../../sql-reference/data-types/string.md).
@ -149,11 +149,11 @@ Alias: `timezoneOffset`.
**Arguments** **Arguments**
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
**Returned value** **Returned value**
- Offset from UTC in seconds. - Offset from UTC in seconds.
Type: [Int32](../../sql-reference/data-types/int-uint.md). Type: [Int32](../../sql-reference/data-types/int-uint.md).
@ -599,7 +599,7 @@ Aliases: `dateAdd`, `DATE_ADD`.
- `quarter` - `quarter`
- `year` - `year`
- `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md). - `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md).
- `date` — The date or date with time to which `value` is added. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - `date` — The date or date with time to which `value` is added. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value** **Returned value**
@ -704,7 +704,7 @@ Aliases: `dateSub`, `DATE_SUB`.
- `quarter` - `quarter`
- `year` - `year`
- `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md).
- `date` — The date or date with time from which `value` is subtracted. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - `date` — The date or date with time from which `value` is subtracted. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value** **Returned value**
@ -805,7 +805,7 @@ Aliases: `timeStampSub`, `TIMESTAMP_SUB`.
- `quarter` - `quarter`
- `year` - `year`
- `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md).
- `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value** **Returned value**
@ -963,7 +963,7 @@ formatDateTime(Time, Format\[, Timezone\])
**Returned value(s)** **Returned value(s)**
Returnes time and date values according to the determined format. Returns time and date values according to the determined format.
**Replacement fields** **Replacement fields**
Using replacement fields, you can define a pattern for the resulting string. “Example” column shows formatting result for `2018-01-02 22:33:44`. Using replacement fields, you can define a pattern for the resulting string. “Example” column shows formatting result for `2018-01-02 22:33:44`.
@ -1012,6 +1012,45 @@ Result:
└────────────────────────────────────────────┘ └────────────────────────────────────────────┘
``` ```
## dateName {#dataname}
Returns part of date with specified date part.
**Syntax**
``` sql
dateName(date_part, date)
```
**Arguments**
- `date_part` - Date part. Possible values .
- `date` — Date [Date](../../sql-reference/data-types/date.md) or DateTime [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md).
**Returned value**
- Specified date part of date.
Type: [String](../../sql-reference/data-types/string.md#string)
**Example**
Query:
```sql
WITH toDateTime('2021-04-14 11:22:33') AS date_value
SELECT dateName('year', date_value), dateName('month', date_value), dateName('day', date_value);
```
Result:
```text
┌─dateName('year', date_value)─┬─dateName('month', date_value)─┬─dateName('day', date_value)─┐
│ 2021 │ April │ 14 │
└──────────────────────────────┴───────────────────────────────┴─────────────────────────────
```
## FROM\_UNIXTIME {#fromunixfime} ## FROM\_UNIXTIME {#fromunixfime}
Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type. Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type.

View File

@ -6,7 +6,7 @@ toc_title: Encoding
# Encoding Functions {#encoding-functions} # Encoding Functions {#encoding-functions}
## char {#char} ## char {#char}
Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow. Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow.
**Syntax** **Syntax**
@ -156,7 +156,7 @@ Performs the opposite operation of [hex](#hex). It interprets each pair of hexad
If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions. If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions.
!!! note "Note" !!! note "Note"
If `unhex` is invoked from within the `clickhouse-client`, binary strings display using UTF-8. If `unhex` is invoked from within the `clickhouse-client`, binary strings display using UTF-8.
Alias: `UNHEX`. Alias: `UNHEX`.
@ -221,3 +221,51 @@ Accepts an integer. Returns a string containing the list of powers of two that t
## bitmaskToArray(num) {#bitmasktoarraynum} ## bitmaskToArray(num) {#bitmasktoarraynum}
Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order. Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order.
## bitPositionsToArray(num) {#bitpositionstoarraynum}
Accepts an integer, argument will be converted to unsigned integer type. Returns an array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order.
**Syntax**
```sql
bitPositionsToArray(arg)
```
**Arguments**
- `arg` — Integer value.Types: [Int/UInt](../../sql-reference/data-types/int-uint.md)
**Returned value**
An array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order.
**Example**
Query:
``` sql
SELECT bitPositionsToArray(toInt8(1)) AS bit_positions;
```
Result:
``` text
┌─bit_positions─┐
│ [0] │
└───────────────┘
```
Query:
``` sql
select bitPositionsToArray(toInt8(-1)) as bit_positions;
```
Result:
``` text
┌─bit_positions─────┐
│ [0,1,2,3,4,5,6,7] │
└───────────────────┘
```

View File

@ -831,7 +831,7 @@ Returns 0 for the first row and the difference from the previous row for each su
!!! warning "Warning" !!! warning "Warning"
It can reach the previous row only inside the currently processed data block. It can reach the previous row only inside the currently processed data block.
The result of the function depends on the affected data blocks and the order of data in the block. The result of the function depends on the affected data blocks and the order of data in the block.
The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user. The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user.
@ -908,7 +908,7 @@ Same as for [runningDifference](./other-functions.md#other_functions-runningdiff
## runningConcurrency {#runningconcurrency} ## runningConcurrency {#runningconcurrency}
Calculates the number of concurrent events. Calculates the number of concurrent events.
Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type.
The function calculates the total number of active (concurrent) events for each event start time. The function calculates the total number of active (concurrent) events for each event start time.
@ -1424,11 +1424,83 @@ Result:
└───────────┴────────┘ └───────────┴────────┘
``` ```
## initializeAggregation {#initializeaggregation}
Calculates result of aggregate function based on single value. It is intended to use this function to initialize aggregate functions with combinator [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state). You can create states of aggregate functions and insert them to columns of type [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) or use initialized aggregates as default values.
**Syntax**
``` sql
initializeAggregation (aggregate_function, arg1, arg2, ..., argN)
```
**Arguments**
- `aggregate_function` — Name of the aggregation function to initialize. [String](../../sql-reference/data-types/string.md).
- `arg` — Arguments of aggregate function.
**Returned value(s)**
- Result of aggregation for every row passed to the function.
The return type is the same as the return type of function, that `initializeAgregation` takes as first argument.
**Example**
Query:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000));
```
Result:
```text
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘
```
Query:
```sql
SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5));
```
Result:
```text
┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
│ 2 │ AggregateFunction(sum, UInt8) │
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
└────────────────────────────┴───────────────────────────────┘
```
Example with `AggregatingMergeTree` table engine and `AggregateFunction` column:
```sql
CREATE TABLE metrics
(
key UInt64,
value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0))
)
ENGINE = AggregatingMergeTree
ORDER BY key
```
```sql
INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42)))
```
**See Also**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
## finalizeAggregation {#function-finalizeaggregation} ## finalizeAggregation {#function-finalizeaggregation}
Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator). Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator).
**Syntax** **Syntax**
``` sql ``` sql
finalizeAggregation(state) finalizeAggregation(state)
@ -1442,7 +1514,7 @@ finalizeAggregation(state)
- Value/values that was aggregated. - Value/values that was aggregated.
Type: Value of any types that was aggregated. Type: Value of any types that was aggregated.
**Examples** **Examples**
@ -1474,7 +1546,7 @@ Result:
└──────────────────────────────────┘ └──────────────────────────────────┘
``` ```
Note that `NULL` values are ignored. Note that `NULL` values are ignored.
Query: Query:
@ -1520,10 +1592,9 @@ Result:
└────────┴─────────────┴────────────────┘ └────────┴─────────────┴────────────────┘
``` ```
**See Also** **See Also**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) - [initializeAggregation](#initializeaggregation)
## runningAccumulate {#runningaccumulate} ## runningAccumulate {#runningaccumulate}

View File

@ -188,6 +188,24 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL '4' day + INTERV
└─────────────────────┴────────────────────────────────────────────────────────────┘ └─────────────────────┴────────────────────────────────────────────────────────────┘
``` ```
You can work with dates without using `INTERVAL`, just by adding or subtracting seconds, minutes, and hours. For example, an interval of one day can be set by adding `60*60*24`.
!!! note "Note"
The `INTERVAL` syntax or `addDays` function are always preferred. Simple addition or subtraction (syntax like `now() + ...`) doesn't consider time settings. For example, daylight saving time.
Examples:
``` sql
SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 60 * 24 AS time_plus_24_hours, time + toIntervalDay(1) AS time_plus_1_day;
```
``` text
┌────────────────time─┬──time_plus_24_hours─┬─────time_plus_1_day─┐
│ 2014-10-26 00:00:00 │ 2014-10-26 23:00:00 │ 2014-10-27 00:00:00 │
└─────────────────────┴─────────────────────┴─────────────────────┘
```
**See Also** **See Also**
- [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type - [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type
@ -295,4 +313,3 @@ SELECT * FROM t_null WHERE y IS NOT NULL
│ 2 │ 3 │ │ 2 │ 3 │
└───┴───┘ └───┴───┘
``` ```

View File

@ -56,4 +56,4 @@ Result:
ATTACH TABLE name UUID '<uuid>' (col1 Type1, ...) ATTACH TABLE name UUID '<uuid>' (col1 Type1, ...)
``` ```
It creates new table with provided structure and attaches data from table with the specified UUID. It creates new table with provided structure and attaches data from table with the specified UUID.

View File

@ -366,9 +366,9 @@ Returns a list of clusters. All available clusters are listed in the [system.clu
``` sql ``` sql
SHOW CLUSTER '<name>' SHOW CLUSTER '<name>'
SWOW CLUSTERS [LIKE|NOT LIKE '<pattern>'] [LIMIT <N>] SHOW CLUSTERS [LIKE|NOT LIKE '<pattern>'] [LIMIT <N>]
``` ```
### Examples ### Examples {#show-cluster-examples}
Query: Query:

View File

@ -38,6 +38,7 @@ The list of available `SYSTEM` statements:
- [START REPLICATION QUEUES](#query_language-system-start-replication-queues) - [START REPLICATION QUEUES](#query_language-system-start-replication-queues)
- [SYNC REPLICA](#query_language-system-sync-replica) - [SYNC REPLICA](#query_language-system-sync-replica)
- [RESTART REPLICA](#query_language-system-restart-replica) - [RESTART REPLICA](#query_language-system-restart-replica)
- [RESTORE REPLICA](#query_language-system-restore-replica)
- [RESTART REPLICAS](#query_language-system-restart-replicas) - [RESTART REPLICAS](#query_language-system-restart-replicas)
## RELOAD EMBEDDED DICTIONARIES {#query_language-system-reload-emdedded-dictionaries} ## RELOAD EMBEDDED DICTIONARIES {#query_language-system-reload-emdedded-dictionaries}
@ -290,13 +291,60 @@ After running this statement the `[db.]replicated_merge_tree_family_table_name`
### RESTART REPLICA {#query_language-system-restart-replica} ### RESTART REPLICA {#query_language-system-restart-replica}
Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed.
Initialization replication quene based on ZooKeeper date happens in the same way as `ATTACH TABLE` statement. For a short time the table will be unavailable for any operations. Initialization replication queue based on ZooKeeper date happens in the same way as `ATTACH TABLE` statement. For a short time the table will be unavailable for any operations.
``` sql ``` sql
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
``` ```
### RESTORE REPLICA {#query_language-system-restore-replica}
Restores a replica if data is [possibly] present but Zookeeper metadata is lost.
Works only on readonly `ReplicatedMergeTree` tables.
One may execute query after:
- ZooKeeper root `/` loss.
- Replicas path `/replicas` loss.
- Individual replica path `/replicas/replica_name/` loss.
Replica attaches locally found parts and sends info about them to Zookeeper.
Parts present on replica before metadata loss are not re-fetched from other replicas if not being outdated
(so replica restoration does not mean re-downloading all data over the network).
Caveat: parts in all states are moved to `detached/` folder. Parts active before data loss (Committed) are attached.
#### Syntax
```sql
SYSTEM RESTORE REPLICA [db.]replicated_merge_tree_family_table_name [ON CLUSTER cluster_name]
```
Alternative syntax:
```sql
SYSTEM RESTORE REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name
```
#### Example
```sql
-- Creating table on multiple servers
CREATE TABLE test(n UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/', '{replica}')
ORDER BY n PARTITION BY n % 10;
INSERT INTO test SELECT * FROM numbers(1000);
-- zookeeper_delete_path("/clickhouse/tables/test", recursive=True) <- root loss.
SYSTEM RESTART REPLICA test; -- Table will attach as readonly as metadata is missing.
SYSTEM RESTORE REPLICA test; -- Need to execute on every replica, another way: RESTORE REPLICA test ON CLUSTER cluster
```
### RESTART REPLICAS {#query_language-system-restart-replicas} ### RESTART REPLICAS {#query_language-system-restart-replicas}
Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed

View File

@ -113,7 +113,7 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...)
- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. - `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern.
- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` お互いから1800秒以上発生するイベントと一致します。 これらのイベントの間に任意の数のイベントを配置できます。 を使用することができます `>=`, `>`, `<`, `<=` 演算子。 - `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` お互いから1800秒以上発生するイベントと一致します。 これらのイベントの間に任意の数のイベントを配置できます。 を使用することができます `>=`, `>`, `<`, `<=`, `==` 演算子。
**例** **例**

View File

@ -685,7 +685,9 @@ PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy = 'moving_from_ssd_to_hdd' SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
``` ```
По умолчанию используется политика хранения `default` в которой есть один том и один диск, указанный в `<path>`. В данный момент менять политику хранения после создания таблицы нельзя. По умолчанию используется политика хранения `default` в которой есть один том и один диск, указанный в `<path>`.
Изменить политику хранения после создания таблицы можно при помощи запроса [ALTER TABLE ... MODIFY SETTING]. При этом необходимо учесть, что новая политика должна содержать все тома и диски предыдущей политики с теми же именами.
Количество потоков для фоновых перемещений кусков между дисками можно изменить с помощью настройки [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) Количество потоков для фоновых перемещений кусков между дисками можно изменить с помощью настройки [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size)

View File

@ -1168,12 +1168,15 @@ SELECT * FROM topic1_stream;
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `STRING` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` | | — | [FixedString](../sql-reference/data-types/fixedstring.md) | `STRING` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При обработке запроса `INSERT`, ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`. Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
Неподдержанные типы данных Parquet: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`.
Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных, ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. Неподдерживаемые типы данных Parquet: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы.
### Вставка и выборка данных {#vstavka-i-vyborka-dannykh} ### Вставка и выборка данных {#vstavka-i-vyborka-dannykh}
@ -1197,6 +1200,53 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
`Arrow` — это Apache Arrow's "file mode" формат. Он предназначен для произвольного доступа в памяти. `Arrow` — это Apache Arrow's "file mode" формат. Он предназначен для произвольного доступа в памяти.
### Соответствие типов данных {#data_types-matching-arrow}
Таблица ниже содержит поддерживаемые типы данных и их соответствие [типам данных](../sql-reference/data-types/index.md) ClickHouse для запросов `INSERT` и `SELECT`.
| Тип данных Arrow (`INSERT`) | Тип данных ClickHouse | Тип данных Arrow (`SELECT`) |
|-----------------------------|-----------------------------------------------------|-----------------------------|
| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) | `UINT8` |
| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) | `INT8` |
| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) | `UINT16` |
| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) | `INT16` |
| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) | `UINT32` |
| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) | `INT32` |
| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) | `UINT64` |
| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) | `INT64` |
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT32` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных Arrow `DECIMAL` как `Decimal128`.
Неподдерживаемые типы данных Arrow: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Arrow. При вставке данных ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы.
### Вставка данных {#inserting-data-arrow}
Чтобы вставить в ClickHouse данные из файла в формате Arrow, используйте команду следующего вида:
``` bash
$ cat filename.arrow | clickhouse-client --query="INSERT INTO some_table FORMAT Arrow"
```
### Вывод данных {#selecting-data-arrow}
Чтобы получить данные из таблицы ClickHouse и сохранить их в файл формата Arrow, используйте команду следующего вида:
``` bash
$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow}
```
## ArrowStream {#data-format-arrow-stream} ## ArrowStream {#data-format-arrow-stream}
`ArrowStream` — это Apache Arrow's "stream mode" формат. Он предназначен для обработки потоков в памяти. `ArrowStream` — это Apache Arrow's "stream mode" формат. Он предназначен для обработки потоков в памяти.
@ -1225,9 +1275,11 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` | | `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `-` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При обработке запроса `INSERT`, ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`. Массивы могут быть вложенными и иметь в качестве аргумента значение типа `Nullable`.
ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При выполнении запроса `INSERT` ClickHouse обрабатывает тип данных ORC `DECIMAL` как `Decimal128`.
Неподдерживаемые типы данных ORC: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. Неподдерживаемые типы данных ORC: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
@ -1397,4 +1449,3 @@ $ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum
``` text ``` text
f9725a22f9191e064120d718e26862a9 - f9725a22f9191e064120d718e26862a9 -
``` ```

View File

@ -38,6 +38,8 @@ toc_title: "Клиентские библиотеки от сторонних р
- Ruby - Ruby
- [ClickHouse (Ruby)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- Rust
- [Klickhouse](https://github.com/Protryon/klickhouse)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickhouse](https://github.com/IMSMWU/RClickhouse)

View File

@ -1,6 +1,6 @@
# Настройки MergeTree таблиц {#merge-tree-settings} # Настройки MergeTree таблиц {#merge-tree-settings}
Значения настроек для всех MergeTree таблиц можно посмотреть в таблице `system.merge_tree_settings`, их можно переопределить в `config.xml` в секции `merge_tree`, или задать в секции `SETTINGS` у каждой таблицы. Значения настроек всех MergeTree таблиц собраны в таблице `system.merge_tree_settings`. Их можно переопределить в разделе `merge_tree` файла `config.xml` или задать в секции `SETTINGS` каждой таблицы.
Пример переопределения в `config.xml`: Пример переопределения в `config.xml`:
@ -10,7 +10,7 @@
</merge_tree> </merge_tree>
``` ```
Пример для определения в `SETTINGS` у конкретной таблицы: Пример установки `SETTINGS` для конкретной таблицы:
``` sql ``` sql
CREATE TABLE foo CREATE TABLE foo
@ -22,7 +22,7 @@ ORDER BY tuple()
SETTINGS max_suspicious_broken_parts = 500; SETTINGS max_suspicious_broken_parts = 500;
``` ```
Пример изменения настроек у конкретной таблицы командой `ALTER TABLE ... MODIFY SETTING`: Пример изменения настроек для конкретной таблицы при помощи команды `ALTER TABLE ... MODIFY SETTING`:
``` sql ``` sql
ALTER TABLE foo ALTER TABLE foo
@ -31,7 +31,7 @@ ALTER TABLE foo
## parts_to_throw_insert {#parts-to-throw-insert} ## parts_to_throw_insert {#parts-to-throw-insert}
Eсли число кусков в партиции превышает значение `parts_to_throw_insert`, INSERT прерывается с исключением `Too many parts (N). Merges are processing significantly slower than inserts`. Eсли число активных кусков в партиции больше значения `parts_to_throw_insert`, то INSERT прерывается с исключением: `Too many parts (N). Merges are processing significantly slower than inserts`.
Возможные значения: Возможные значения:
@ -39,13 +39,13 @@ Eсли число кусков в партиции превышает знач
Значение по умолчанию: 300. Значение по умолчанию: 300.
Для достижения максимальной производительности запросов `SELECT` необходимо минимизировать количество обрабатываемых кусков, см. [Дизайн MergeTree](../../development/architecture.md#merge-tree). Чтобы производительность запросов `SELECT` стала максимальной, необходимо минимизировать количество обрабатываемых кусков, см. [Дизайн MergeTree](../../development/architecture.md#merge-tree).
Можно установить большее значение 600 (1200), это уменьшит вероятность возникновения ошибки `Too many parts`, но в тоже время вы позже обнаружите возможную проблему со слияниями (например, из-за недостатка места на диске) и деградацию производительности `SELECT`. Можно установить значение больше — 600 (1200) кусков. Тогда ошибка `Too many parts` будет появляться реже, но при этом могут возникнуть проблемы с фоновыми слияниями и производительностью `SELECT`-запросов.
## parts_to_delay_insert {#parts-to-delay-insert} ## parts_to_delay_insert {#parts-to-delay-insert}
Eсли число кусков в партиции превышает значение `parts_to_delay_insert`, `INSERT` искусственно замедляется. Eсли число кусков в партиции больше значения `parts_to_delay_insert`, то `INSERT` искусственно замедляется.
Возможные значения: Возможные значения:
@ -53,31 +53,31 @@ Eсли число кусков в партиции превышает знач
Значение по умолчанию: 150. Значение по умолчанию: 150.
ClickHouse искусственно выполняет `INSERT` дольше (добавляет sleep), чтобы фоновый механизм слияния успевал слиять куски быстрее, чем они добавляются. ClickHouse искусственно выполняет `INSERT` дольше (добавляет sleep) так, чтобы куски сливались в фоновом процессе быстрее, чем добавляются.
## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert} ## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert}
Если число неактивных кусков в партиции превышает значение `inactive_parts_to_throw_insert`, `INSERT` прерывается с исключением «Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts». Если число неактивных кусков в партиции больше значения `inactive_parts_to_throw_insert`, то `INSERT` прерывается с исключением `Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts`.
Возможные значения: Возможные значения:
- Положительное целое число. - Положительное целое число.
Значение по умолчанию: 0 (не ограничено). Значение по умолчанию: 0 (без ограничений).
## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert} ## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert}
Если число неактивных кусков в партиции больше или равно значению `inactive_parts_to_delay_insert`, `INSERT` искусственно замедляется. Это полезно, когда сервер не может быстро очистить неактивные куски. Если число неактивных кусков в партиции больше или равно значению `inactive_parts_to_delay_insert`, то `INSERT` искусственно замедляется. Это помогает, когда сервер не может быстро очистить неактивные куски.
Возможные значения: Возможные значения:
- Положительное целое число. - Положительное целое число.
Значение по умолчанию: 0 (не ограничено). Значение по умолчанию: 0 (без ограничений).
## max_delay_to_insert {#max-delay-to-insert} ## max_delay_to_insert {#max-delay-to-insert}
Величина в секундах, которая используется для расчета задержки `INSERT`, если число кусков в партиции превышает значение [parts_to_delay_insert](#parts-to-delay-insert). Величина в секундах, которая используется для расчета задержки `INSERT` в случаях, когда число кусков в партиции больше значения [parts_to_delay_insert](#parts-to-delay-insert).
Возможные значения: Возможные значения:
@ -87,17 +87,17 @@ ClickHouse искусственно выполняет `INSERT` дольше (д
Величина задержки (в миллисекундах) для `INSERT` вычисляется по формуле: Величина задержки (в миллисекундах) для `INSERT` вычисляется по формуле:
``` code ```code
max_k = parts_to_throw_insert - parts_to_delay_insert max_k = parts_to_throw_insert - parts_to_delay_insert
k = 1 + parts_count_in_partition - parts_to_delay_insert k = 1 + parts_count_in_partition - parts_to_delay_insert
delay_milliseconds = pow(max_delay_to_insert * 1000, k / max_k) delay_milliseconds = pow(max_delay_to_insert * 1000, k / max_k)
``` ```
Т.е. если в партиции уже 299 кусков и parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, `INSERT` замедлится на `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` миллисекунд. Т.е. если в партиции уже 299 кусков и parts_to_throw_insert = 300, parts_to_delay_insert = 150, а max_delay_to_insert = 1, то `INSERT` замедлится на `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` миллисекунд.
## max_parts_in_total {#max-parts-in-total} ## max_parts_in_total {#max-parts-in-total}
Eсли суммарное число активных кусков во всех партициях таблицы превышает значение `max_parts_in_total`, INSERT прерывается с исключением `Too many parts (N)`. Eсли суммарное число активных кусков во всех партициях таблицы больше значения `max_parts_in_total`, то INSERT прерывается с исключением `Too many parts (N)`.
Возможные значения: Возможные значения:
@ -105,20 +105,22 @@ Eсли суммарное число активных кусков во все
Значение по умолчанию: 100000. Значение по умолчанию: 100000.
Большое число кусков в таблице снижает производительность запросов ClickHouse и увеличивает время старта ClickHouse. Чаще всего это следствие неправильного дизайна (ошибки при выборе стратегии партиционирования -- слишком мелкие партиции). С большим числом кусков в таблице производительность запросов ClickHouse снижается, а время старта ClickHouse — увеличивается. Чаще всего это следствие неправильного дизайна (ошибки выбора стратегии партиционирования, например, слишком мелкие партиции).
## replicated_deduplication_window {#replicated-deduplication-window} ## replicated_deduplication_window {#replicated-deduplication-window}
Количество хеш-сумм последних вставленных блоков, хранящихся в Zookeeper. Количество хеш-сумм последних вставленных блоков, которые хранятся в Zookeeper.
Возможные значения: Возможные значения:
- Положительное целое число. - Положительное целое число.
- 0 (без ограничений).
Значение по умолчанию: 100. Значение по умолчанию: 100.
Команда `Insert` создает один или несколько блоков (кусков). При вставке в Replicated таблицы ClickHouse для [дедупликации вставок](../../engines/table-engines/mergetree-family/replication.md) записывает в Zookeeper хеш-суммы созданных кусков. Но хранятся хеш-суммы не всех кусков, а только последние `replicated_deduplication_window`. Наиболее старые хеш-суммы удаляются из Zookeeper. Команда `Insert` создает один или несколько блоков (кусков). При вставке в Replicated таблицы ClickHouse для [дедупликации вставок](../../engines/table-engines/mergetree-family/replication.md) записывает в Zookeeper хеш-суммы созданных кусков. Но хранятся только последние `replicated_deduplication_window` хеш-сумм. Самые старые хеш-суммы удаляются из Zookeeper.
Большое число `replicated_deduplication_window` замедляет `Insert`-ы. Хеш-сумма рассчитывается от композиции имен и типов полей, а также данных вставленного куска (потока байт). Большое значение `replicated_deduplication_window` замедляет `Insert`, так как приходится сравнивать большее количество хеш-сумм.
Хеш-сумма рассчитывается по названиям и типам полей, а также по данным вставленного куска (потока байт).
## non_replicated_deduplication_window {#non-replicated-deduplication-window} ## non_replicated_deduplication_window {#non-replicated-deduplication-window}
@ -135,7 +137,7 @@ Eсли суммарное число активных кусков во все
## replicated_deduplication_window_seconds {#replicated-deduplication-window-seconds} ## replicated_deduplication_window_seconds {#replicated-deduplication-window-seconds}
Число секунд, после которых хеш-суммы вставленных блоков удаляются из Zookeeper. Время хранения (в секундах) хеш-сумм вставленных блоков в Zookeeper. По истечении этого времени хеш-суммы удаляются.
Возможные значения: Возможные значения:
@ -143,11 +145,11 @@ Eсли суммарное число активных кусков во все
Значение по умолчанию: 604800 (1 неделя). Значение по умолчанию: 604800 (1 неделя).
Аналогично [replicated_deduplication_window](#replicated-deduplication-window), задает, сколько времени хранить хеш-суммы блоков для дедупликции `Insert`. Хеш-суммы старше `replicated_deduplication_window_seconds` удаляются из Zookeeper, даже если их меньше чем `replicated_deduplication_window`. Аналогично [replicated_deduplication_window](#replicated-deduplication-window), настройка `replicated_deduplication_window_seconds` задает время хранения хеш-сумм блоков для дедупликции `Insert`. Хеш-суммы старше значения `replicated_deduplication_window_seconds` удаляются из Zookeeper, даже если количество оставшихся хеш-сумм станет меньше чем `replicated_deduplication_window`.
## old_parts_lifetime {#old-parts-lifetime} ## old_parts_lifetime {#old-parts-lifetime}
Время (в секундах) хранения неактивных кусков, для защиты от потери данных при спонтанной перезагрузке сервера или О.С. Время (в секундах) хранения неактивных кусков для защиты от потери данных при спонтанной перезагрузке сервера.
Возможные значения: Возможные значения:
@ -155,12 +157,16 @@ Eсли суммарное число активных кусков во все
Значение по умолчанию: 480. Значение по умолчанию: 480.
После слияния нескольких кусков в новый кусок, ClickHouse помечает исходные куски как неактивные и удаляет их после `old_parts_lifetime` секунд. После объединения нескольких кусков в один новый ClickHouse помечает исходные куски как неактивные и удаляет их по прошествии `old_parts_lifetime` секунд.
Неактивные куски удаляются, если они не используются в текущих запросах, т.е. если счетчик ссылок куска `refcount` равен нулю. Неактивные куски удаляются, если они не нужны для текущих запросов, т.е. если счетчик ссылок куска `refcount` имеет нулевое значение.
Неактивные куски удаляются не сразу, потому что при записи нового куска не вызывается `fsync`, т.е. некоторое время новый кусок находится только в оперативной памяти сервера (кеше О.С.). Т.о. при спонтанной перезагрузке сервера новый (смерженный) кусок может быть потерян или испорчен. В этом случае ClickHouse в процессе старта при проверке целостности кусков обнаружит проблему, вернет неактивные куски в список активных и позже заново их смержит. Сломанный кусок в этом случае переименовывается (добавляется префикс broken_) и перемещается в папку detached. Если проверка целостности не обнаруживает проблем в смерженном куске, то исходные неактивные куски переименовываются (добавляется префикс ignored_) и перемещаются в папку detached. При записи нового куска `fsync` не вызывается, поэтому неактивные куски удаляются позже. Это значит, что некоторое время новый кусок находится только в оперативной памяти сервера (кеш ОС). Если сервер перезагрузится спонтанно, новый слитый кусок может испортиться или потеряться.
Стандартное значение Linux dirty_expire_centisecs - 30 секунд (максимальное время, которое записанные данные хранятся только в оперативной памяти), но при больших нагрузках на дисковую систему, данные могут быть записаны намного позже. Экспериментально было найдено время - 480 секунд, за которое гарантированно новый кусок будет записан на диск. Во время запуска сервер ClickHouse проверяет целостность кусков.
Если новый (слитый) кусок поврежден, ClickHouse возвращает неактивные куски в список активных и позже снова выполняет слияние. В этом случае испорченный кусок получает новое имя (добавляется префикс `broken_`) и попадает в каталог `detached`.
Если проверка целостности не выявляет проблем в слитом куске, то исходные неактивные куски переименовываются (добавляется префикс `ignored_`) и перемещаются в каталог `detached`.
Стандартное для Linux значение `dirty_expire_centisecs` — 30 секунд. Это максимальное время, в течение которого записанные данные хранятся только в оперативной памяти. Если нагрузка на дисковую систему большая, то данные записываются намного позже. Значение 480 секунд подобрали экспериментальным путем — это время, за которое новый кусок гарантированно запишется на диск.
## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout} ## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout}
@ -197,8 +203,8 @@ Eсли суммарное число активных кусков во все
## max_bytes_to_merge_at_max_space_in_pool {#max-bytes-to-merge-at-max-space-in-pool} ## max_bytes_to_merge_at_max_space_in_pool {#max-bytes-to-merge-at-max-space-in-pool}
Максимальный суммарный размер кусков (в байтах) в одном слиянии, при наличии свободных ресурсов в фоновом пуле. Максимальный суммарный размер кусков (в байтах) в одном слиянии, если есть свободные ресурсы в фоновом пуле.
`max_bytes_to_merge_at_max_space_in_pool` -- примерно соответствует максимально возможному размеру куска, созданного автоматическим фоновым слиянием. `max_bytes_to_merge_at_max_space_in_pool` примерно соответствует максимально возможному размеру куска, созданного автоматическим фоновым слиянием.
Возможные значения: Возможные значения:
@ -206,26 +212,27 @@ Eсли суммарное число активных кусков во все
Значение по умолчанию: 161061273600 (150ГБ). Значение по умолчанию: 161061273600 (150ГБ).
Планировщик мержей периодически анализирует размер и количество кусков в партициях, и при достаточном количестве свободных ресурсов в фоновом пуле начинает фоновое слияние. Слияния происходят до тех пор, пока суммарный размер входных кусков не достигнет `max_bytes_to_merge_at_max_space_in_pool`. Планировщик слияний периодически анализирует размер и количество кусков в партициях, и если в пуле хватает ресурсов, то начинает фоновое слияние. Слияния выполняются до тех пор, пока суммарный размер входных кусков не достигнет `max_bytes_to_merge_at_max_space_in_pool`.
Слияния, инициированные `optimize final`, не учитывают `max_bytes_to_merge_at_max_space_in_pool` и размеры кусков и слияют куски только с учетом наличия ресурсов в фоновом пуле, пока не останется один кусок в партиции. Слияния, начатые по [OPTIMIZE FINAL](../../sql-reference/statements/optimize.md), не учитывают `max_bytes_to_merge_at_max_space_in_pool` и объединяют куски пока есть доступные ресурсы (свободное дисковое пространство) до тех пор, пока в партиции не останется один кусок.
## max_bytes_to_merge_at_min_space_in_pool {#max-bytes-to-merge-at-min-space-in-pool} ## max_bytes_to_merge_at_min_space_in_pool {#max-bytes-to-merge-at-min-space-in-pool}
Максимальный суммарный размер кусков (в байтах) в одном слиянии, при минимальных свободных ресурсах в фоновом пуле. Максимальный суммарный размер кусков (в байтах) в одном слиянии при минимуме свободных ресурсов в фоновом пуле.
Возможные значения: Возможные значения:
- Положительное целое число. - Положительное целое число.
Значение по умолчанию: 1048576 Значение по умолчанию: 1048576 (1 МБ).
`max_bytes_to_merge_at_min_space_in_pool` задает максимальный суммарный размер кусков, для которых можно начать слияние, несмотря на недостаток свободных ресурсов в фоновом пуле (дискового пространства). Это необходимо, чтобы уменьшить количество маленьких кусков и вероятность ошибки `Too many parts`. `max_bytes_to_merge_at_min_space_in_pool` задает максимальный суммарный размер кусков, которые можно объединить несмотря на нехватку свободных ресурсов (дискового пространства) в фоновом пуле. Это нужно, чтобы уменьшить количество маленьких кусков и снизить вероятность ошибки `Too many parts`.
Слияния резервируют дисковое пространство, удваивая суммарный размер кусков в слиянии. Таким образом, при малом количестве свободного места на диске может сложится ситуация, что свободное место есть, но оно уже зарезервировано идущими слиянияними, поэтому другие слияния не могут начаться, и количество маленьких кусков в партиции растет с каждым инсертом.
Слияния резервируют дисковое пространство, удваивая суммарный размер кусков в слиянии. Поэтому при малом объеме свободного места на диске может сложиться ситуация, когда свободное место есть, но оно уже зарезервировано текущими слияниями. Из-за этого другие слияния не начинаются, и количество маленьких кусков в партиции растет с каждым запросом `INSERT`.
## merge_max_block_size {#merge-max-block-size} ## merge_max_block_size {#merge-max-block-size}
Количество строк в блоках, которые читаются из слияемых кусков. Количество строк в блоках, которые читаются из объединяемых кусков.
Возможные значения: Возможные значения:
@ -233,7 +240,7 @@ Eсли суммарное число активных кусков во все
Значение по умолчанию: 8192 Значение по умолчанию: 8192
Слияние читает строки из кусков блоками по `merge_max_block_size` строк, производит слияние и пишет результат в новый кусок. Читаемый блок помещается в оперативную память, т.е. `merge_max_block_size` влияет на размер оперативной памяти, необходимой для слияния. Таким образом, слияния могут потреблять большое количество оперативной памяти для таблиц, хранящих очень большие строки (если средний размер строки 100кб, то при слиянии 10 кусков будет использовано (100кб * 10 * 8192) =~ 8ГБ ОЗУ). Уменьшив `merge_max_block_size`, можно сократить размер оперативной памяти, необходимой для слияния. Слияние читает строки из кусков блоками по `merge_max_block_size` строк, производит слияние и записывает результат в новый кусок. Читаемый блок помещается в оперативную память, т.е. `merge_max_block_size` влияет на размер оперативной памяти, необходимой для слияния. Таким образом, слияния могут потреблять большое количество оперативной памяти для таблиц, хранящих очень большие строки (если средний размер строки 100кб, то при слиянии 10 кусков будет использовано (100кб * 10 * 8192) =~ 8ГБ оперативной памяти). Уменьшив `merge_max_block_size`, можно сократить размер оперативной памяти, необходимой для слияния, но при этом процесс слияния замедлится.
## max_part_loading_threads {#max-part-loading-threads} ## max_part_loading_threads {#max-part-loading-threads}
@ -243,9 +250,9 @@ Eсли суммарное число активных кусков во все
- Положительное целое число. - Положительное целое число.
Значение по умолчанию: auto (количество ядер процессора). Значение по умолчанию: определяется автоматически (по количеству ядер процессора).
При старте ClickHouse читает все куски всех таблиц (читает файлы с метаданными кусков), чтобы построить в ОЗУ список всех кусков. В некоторых системах с большим количеством кусков этот процесс может занимать длительное время, и это время можно сократить, увеличив `max_part_loading_threads` (если при этом процессе есть недозагруженность CPU и диска). На старте ClickHouse читает все куски из всех таблиц (читает файлы с метаданными кусков), чтобы построить в оперативной памяти список всех кусков. В некоторых системах с большим количеством кусков этот процесс может занимать длительное время. Это время можно сократить, увеличив `max_part_loading_threads` (если при этом хватает ресурсов процессора и диска).
## max_partitions_to_read {#max-partitions-to-read} ## max_partitions_to_read {#max-partitions-to-read}

View File

@ -2078,7 +2078,7 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
- Положительное целое число. - Положительное целое число.
Значение по умолчанию: 16. Значение по умолчанию: 128.
## background_fetches_pool_size {#background_fetches_pool_size} ## background_fetches_pool_size {#background_fetches_pool_size}
@ -2376,18 +2376,6 @@ SELECT * FROM system.events WHERE event='QueryMemoryLimitExceeded';
└──────────────────────────┴───────┴───────────────────────────────────────────────────────┘ └──────────────────────────┴───────┴───────────────────────────────────────────────────────┘
``` ```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
Включает или отключает поддержку целочисленных значений, превышающих максимальное значение, допустимое для типа `int`.
Возможные значения:
- 1 — большие целочисленные значения поддерживаются.
- 0 — большие целочисленные значения не поддерживаются.
Значение по умолчанию: `0`.
## lock_acquire_timeout {#lock_acquire_timeout} ## lock_acquire_timeout {#lock_acquire_timeout}
Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы. Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы.
@ -2969,4 +2957,70 @@ SELECT
FROM fuse_tbl FROM fuse_tbl
``` ```
## flatten_nested {#flatten-nested}
Устанавливает формат данных у [вложенных](../../sql-reference/data-types/nested-data-structures/nested.md) столбцов.
Возможные значения:
- 1 — вложенный столбец преобразуется к отдельным массивам.
- 0 — вложенный столбец преобразуется к массиву кортежей.
Значение по умолчанию: `1`.
**Использование**
Если установлено значение `0`, можно использовать любой уровень вложенности.
**Примеры**
Запрос:
``` sql
SET flatten_nested = 1;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Результат:
``` text
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n.a` Array(UInt32),
`n.b` Array(UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
Запрос:
``` sql
SET flatten_nested = 0;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Результат:
``` text
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n` Nested(a UInt32, b UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->

View File

@ -2,7 +2,7 @@
Содержит экземпляры трассировки стека адресов вызова, собранные с помощью семплирующего профайлера запросов. Содержит экземпляры трассировки стека адресов вызова, собранные с помощью семплирующего профайлера запросов.
ClickHouse создает эту таблицу когда утсановлена настройка [trace_log](../server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) в конфигурационном файле сервереа. А также настройки [query_profiler_real_time_period_ns](../settings/settings.md#query_profiler_real_time_period_ns) и [query_profiler_cpu_time_period_ns](../settings/settings.md#query_profiler_cpu_time_period_ns). ClickHouse создает эту таблицу когда установлена настройка [trace_log](../server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) в конфигурационном файле сервера. А также настройки [query_profiler_real_time_period_ns](../settings/settings.md#query_profiler_real_time_period_ns) и [query_profiler_cpu_time_period_ns](../settings/settings.md#query_profiler_cpu_time_period_ns).
Для анализа stack traces, используйте функции интроспекции `addressToLine`, `addressToSymbol` и `demangle`. Для анализа stack traces, используйте функции интроспекции `addressToLine`, `addressToSymbol` и `demangle`.

View File

@ -116,7 +116,7 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...)
- `.*` — соответствует любому количеству событий. Для этого элемента шаблона не надо задавать условия. - `.*` — соответствует любому количеству событий. Для этого элемента шаблона не надо задавать условия.
- `(?t operator value)` — устанавливает время в секундах, которое должно разделять два события. Например, шаблон `(?1)(?t>1800)(?2)` соответствует событиям, которые произошли более чем через 1800 секунд друг от друга. Между этими событиями может находиться произвольное количество любых событий. Операторы могут быть `>=`, `>`, `<`, `<=`. - `(?t operator value)` — устанавливает время в секундах, которое должно разделять два события. Например, шаблон `(?1)(?t>1800)(?2)` соответствует событиям, которые произошли более чем через 1800 секунд друг от друга. Между этими событиями может находиться произвольное количество любых событий. Операторы могут быть `>=`, `>`, `<`, `<=`, `==`.
**Примеры** **Примеры**
@ -496,3 +496,258 @@ FROM
Решение: пишем в запросе GROUP BY SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 Решение: пишем в запросе GROUP BY SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5
``` ```
## sequenceNextNode {#sequenceNextNode}
Возвращает значение следующего события, соответствующего цепочке событий.
_Экспериментальная функция, чтобы включить ее, выполните: `SET allow_experimental_funnel_functions = 1`._
**Синтаксис**
``` sql
sequenceNextNode(direction, base)(timestamp, event_column, base_condition, event1, event2, event3, ...)
```
**Параметры**
- `direction` — используется для навигации по направлениям.
- forward — двигаться вперед.
- backward — двигаться назад.
- `base` — используется для задания начальной точки.
- head — установить начальную точку на первое событие цепочки.
- tail — установить начальную точку на последнее событие цепочки.
- first_match — установить начальную точку на первое соответствующее событие `event1`.
- last_match — установить начальную точку на последнее соответствующее событие `event1`.
**Аргументы**
- `timestamp` — название столбца, содержащего `timestamp`. Поддерживаемые типы данных: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) и другие беззнаковые целые типы.
- `event_column` — название столбца, содержащего значение следующего возвращаемого события. Поддерживаемые типы данных: [String](../../sql-reference/data-types/string.md) и [Nullable(String)](../../sql-reference/data-types/nullable.md).
- `base_condition` — условие, которому должна соответствовать исходная точка.
- `event1`, `event2`, ... — условия, описывающие цепочку событий. [UInt8](../../sql-reference/data-types/int-uint.md).
**Возвращаемые значения**
- `event_column[next_index]` — если есть совпадение с шаблоном и существует следующее значение.
- `NULL` — если нет совпадений с шаблоном или следующего значения не существует.
Тип: [Nullable(String)](../../sql-reference/data-types/nullable.md).
**Пример**
Функцию можно использовать, если есть цепочка событий A->B->C->D->E, и вы хотите определить событие, следующее за B->C, то есть D.
Запрос ищет событие после A->B:
``` sql
CREATE TABLE test_flow (
dt DateTime,
id int,
page String)
ENGINE = MergeTree()
PARTITION BY toYYYYMMDD(dt)
ORDER BY id;
INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'D') (5, 1, 'E');
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'A', page = 'A', page = 'B') as next_flow FROM test_flow GROUP BY id;
```
Результат:
``` text
┌─id─┬─next_flow─┐
│ 1 │ C │
└────┴───────────┘
```
**Поведение для `forward` и `head`**
``` sql
ALTER TABLE test_flow DELETE WHERE 1 = 1 settings mutations_sync = 1;
INSERT INTO test_flow VALUES (1, 1, 'Home') (2, 1, 'Gift') (3, 1, 'Exit');
INSERT INTO test_flow VALUES (1, 2, 'Home') (2, 2, 'Home') (3, 2, 'Gift') (4, 2, 'Basket');
INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3, 'Basket');
```
``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home // Исходная точка, совпадение с Home
1970-01-01 09:00:02 1 Gift // Совпадение с Gift
1970-01-01 09:00:03 1 Exit // Результат
1970-01-01 09:00:01 2 Home // Исходная точка, совпадение с Home
1970-01-01 09:00:02 2 Home // Несовпадение с Gift
1970-01-01 09:00:03 2 Gift
1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift // Исходная точка, несовпадение с Home
1970-01-01 09:00:02 3 Home
1970-01-01 09:00:03 3 Gift
1970-01-01 09:00:04 3 Basket
```
**Поведение для `backward` и `tail`**
``` sql
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page = 'Basket', page = 'Gift') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift
1970-01-01 09:00:03 1 Exit // Исходная точка, несовпадение с Basket
1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home // Результат
1970-01-01 09:00:03 2 Gift // Совпадение с Gift
1970-01-01 09:00:04 2 Basket // Исходная точка, совпадение с Basket
1970-01-01 09:00:01 3 Gift
1970-01-01 09:00:02 3 Home // Результат
1970-01-01 09:00:03 3 Gift // Исходная точка, совпадение с Gift
1970-01-01 09:00:04 3 Basket // Исходная точка, совпадение с Basket
```
**Поведение для `forward` и `first_match`**
``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift // Исходная точка
1970-01-01 09:00:03 1 Exit // Результат
1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home
1970-01-01 09:00:03 2 Gift // Исходная точка
1970-01-01 09:00:04 2 Basket Результат
1970-01-01 09:00:01 3 Gift // Исходная точка
1970-01-01 09:00:02 3 Home // Результат
1970-01-01 09:00:03 3 Gift
1970-01-01 09:00:04 3 Basket
```
``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home
1970-01-01 09:00:02 1 Gift // Исходная точка
1970-01-01 09:00:03 1 Exit // Несовпадение с Home
1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home
1970-01-01 09:00:03 2 Gift // Исходная точка
1970-01-01 09:00:04 2 Basket // Несовпадение с Home
1970-01-01 09:00:01 3 Gift // Исходная точка
1970-01-01 09:00:02 3 Home // Совпадение с Home
1970-01-01 09:00:03 3 Gift // Результат
1970-01-01 09:00:04 3 Basket
```
**Поведение для `backward` и `last_match`**
``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home // Результат
1970-01-01 09:00:02 1 Gift // Исходная точка
1970-01-01 09:00:03 1 Exit
1970-01-01 09:00:01 2 Home
1970-01-01 09:00:02 2 Home // Результат
1970-01-01 09:00:03 2 Gift // Исходная точка
1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift
1970-01-01 09:00:02 3 Home // Результат
1970-01-01 09:00:03 3 Gift // Исходная точка
1970-01-01 09:00:04 3 Basket
```
``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
dt id page
1970-01-01 09:00:01 1 Home // Совпадение с Home, результат `Null`
1970-01-01 09:00:02 1 Gift // Исходная точка
1970-01-01 09:00:03 1 Exit
1970-01-01 09:00:01 2 Home // Результат
1970-01-01 09:00:02 2 Home // Совпадение с Home
1970-01-01 09:00:03 2 Gift // Исходная точка
1970-01-01 09:00:04 2 Basket
1970-01-01 09:00:01 3 Gift // Результат
1970-01-01 09:00:02 3 Home // Совпадение с Home
1970-01-01 09:00:03 3 Gift // Исходная точка
1970-01-01 09:00:04 3 Basket
```
**Поведение для `base_condition`**
``` sql
CREATE TABLE test_flow_basecond
(
`dt` DateTime,
`id` int,
`page` String,
`ref` String
)
ENGINE = MergeTree
PARTITION BY toYYYYMMDD(dt)
ORDER BY id;
INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3, 1, 'B', 'ref2') (4, 1, 'B', 'ref1');
```
``` sql
SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref
1970-01-01 09:00:01 1 A ref4 // Начало не может быть исходной точкой, поскольку столбец ref не соответствует 'ref1'.
1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1
```
``` sql
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref
1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3
1970-01-01 09:00:03 1 B ref2
1970-01-01 09:00:04 1 B ref1 // Конец не может быть исходной точкой, поскольку столбец ref не соответствует 'ref4'.
```
``` sql
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id;
dt id page ref
1970-01-01 09:00:01 1 A ref4 // Эта строка не может быть исходной точкой, поскольку столбец ref не соответствует 'ref3'.
1970-01-01 09:00:02 1 A ref3 // Исходная точка
1970-01-01 09:00:03 1 B ref2 // Результат
1970-01-01 09:00:04 1 B ref1
```
``` sql
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id;
dt id page ref
1970-01-01 09:00:01 1 A ref4
1970-01-01 09:00:02 1 A ref3 // Результат
1970-01-01 09:00:03 1 B ref2 // Исходная точка
1970-01-01 09:00:04 1 B ref1 // Эта строка не может быть исходной точкой, поскольку столбец ref не соответствует 'ref2'.
```

View File

@ -1,40 +0,0 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Инициализирует агрегацию для введеных строчек. Предназначена для функций с суффиксом `State`.
Поможет вам проводить тесты или работать со столбцами типов: `AggregateFunction` и `AggregationgMergeTree`.
**Синтаксис**
``` sql
initializeAggregation (aggregate_function, column_1, column_2)
```
**Аргументы**
- `aggregate_function` — название функции агрегации, состояние которой нужно создать. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — столбец, который передается в функцию агрегации как аргумент. [String](../../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
Возвращает результат агрегации введенной информации. Тип возвращаемого значения такой же, как и для функции, которая становится первым аргументом для `initializeAgregation`.
Пример:
Возвращаемый тип функций с суффиксом `State``AggregateFunction`.
**Пример**
Запрос:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Результат:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -21,7 +21,7 @@ toc_priority: 208
quantileTDigestWeighted(level)(expr, weight) quantileTDigestWeighted(level)(expr, weight)
``` ```
Алиас: `medianTDigest`. Синоним: `medianTDigestWeighted`.
**Аргументы** **Аргументы**

View File

@ -5,11 +5,9 @@ toc_title: Array(T)
# Array(T) {#data-type-array} # Array(T) {#data-type-array}
Массив из элементов типа `T`. Массив из элементов типа `T`. `T` может любым, в том числе массивом. Таким образом поддерживаются многомерные массивы.
`T` может любым, в том числе, массивом. Таким образом поддержаны многомерные массивы. ## Создание массива {#creating-an-array}
## Создание массива {#sozdanie-massiva}
Массив можно создать с помощью функции: Массив можно создать с помощью функции:
@ -45,7 +43,7 @@ SELECT [1, 2] AS x, toTypeName(x)
└───────┴────────────────────┘ └───────┴────────────────────┘
``` ```
## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh} ## Особенности работы с типами данных {#working-with-data-types}
Максимальный размер массива ограничен одним миллионом элементов. Максимальный размер массива ограничен одним миллионом элементов.
@ -76,3 +74,26 @@ Received exception from server (version 1.1.54388):
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
``` ```
## Размер массива {#array-size}
Узнать размер массива можно с помощью подстолбца `size0` без чтения всего столбца. Для многомерных массивов можно использовать подстолбец `sizeN-1`, где `N` — требуемое измерение.
**Пример**
Запрос:
```sql
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
```
Результат:
``` text
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```

View File

@ -29,7 +29,7 @@ CREATE TABLE test.visits
В этом примере объявлена вложенная структура данных `Goals`, содержащая данные о достижении целей. Каждой строке таблицы visits может соответствовать от нуля до произвольного количества достижений целей. В этом примере объявлена вложенная структура данных `Goals`, содержащая данные о достижении целей. Каждой строке таблицы visits может соответствовать от нуля до произвольного количества достижений целей.
Поддерживается только один уровень вложенности. Столбцы вложенных структур, содержащие массивы, эквивалентны многомерным массивам, поэтому их поддержка ограничена (не поддерживается хранение таких столбцов в таблицах с движком семейства MergeTree). Если настройка [flatten_nested](../../../operations/settings/settings.md#flatten-nested) установлена в значение `0` (что не является значением по умолчанию), поддерживаются любые уровни вложенности.
В большинстве случаев, при работе с вложенной структурой данных, указываются отдельные её столбцы. Для этого, имена столбцов указываются через точку. Эти столбцы представляют собой массивы соответствующих типов. Все столбцы-массивы одной вложенной структуры данных имеют одинаковые длины. В большинстве случаев, при работе с вложенной структурой данных, указываются отдельные её столбцы. Для этого, имена столбцов указываются через точку. Эти столбцы представляют собой массивы соответствующих типов. Все столбцы-массивы одной вложенной структуры данных имеют одинаковые длины.

View File

@ -13,7 +13,7 @@ toc_title: Nullable
`NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное. `NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное.
## Особенности хранения {#osobennosti-khraneniia} ## Особенности хранения {#storage-features}
Для хранения значения типа `Nullable` ClickHouse использует: Для хранения значения типа `Nullable` ClickHouse использует:
@ -27,7 +27,34 @@ toc_title: Nullable
!!! info "Info" !!! info "Info"
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз. Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
## Пример использования {#primer-ispolzovaniia} ## Поиск NULL {#finding-null}
Найти в столбце значения `NULL` можно с помощью подстолбца `null`, при этом весь столбец считывать не требуется. Подстолбец содержит `1`, если соответствующее значение равно `NULL`, и `0` если не равно.
**Пример**
Запрос:
``` sql
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
SELECT n.null FROM nullable;
```
Результат:
``` text
┌─n.null─┐
│ 0 │
│ 1 │
│ 0 │
│ 1 │
└────────┘
```
## Пример использования {#usage-example}
``` sql ``` sql
CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog
@ -47,4 +74,3 @@ SELECT x + y from t_null
│ 5 │ │ 5 │
└────────────┘ └────────────┘
``` ```

View File

@ -47,3 +47,30 @@ SELECT tuple(1,NULL) AS x, toTypeName(x)
└──────────┴─────────────────────────────────┘ └──────────┴─────────────────────────────────┘
``` ```
## Адресация элементов кортежа {#addressing-tuple-elements}
К элементам кортежа можно обращаться по индексу и по имени:
``` sql
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
SELECT a.s FROM named_tuples;
SELECT a.2 FROM named_tuples;
```
Результат:
``` text
┌─a.s─┐
│ y │
│ x │
└─────┘
┌─tupleElement(a, 2)─┐
│ 10 │
│ -10 │
└────────────────────┘
```

View File

@ -61,6 +61,7 @@ SETTINGS(format_csv_allow_single_quotes = 0)
- [Локальный файл](#dicts-external_dicts_dict_sources-local_file) - [Локальный файл](#dicts-external_dicts_dict_sources-local_file)
- [Исполняемый файл](#dicts-external_dicts_dict_sources-executable) - [Исполняемый файл](#dicts-external_dicts_dict_sources-executable)
- [Исполняемый пул](#dicts-external_dicts_dict_sources-executable_pool)
- [HTTP(s)](#dicts-external_dicts_dict_sources-http) - [HTTP(s)](#dicts-external_dicts_dict_sources-http)
- СУБД: - СУБД:
- [ODBC](#dicts-external_dicts_dict_sources-odbc) - [ODBC](#dicts-external_dicts_dict_sources-odbc)
@ -69,6 +70,7 @@ SETTINGS(format_csv_allow_single_quotes = 0)
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
- [Redis](#dicts-external_dicts_dict_sources-redis) - [Redis](#dicts-external_dicts_dict_sources-redis)
- [Cassandra](#dicts-external_dicts_dict_sources-cassandra)
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
## Локальный файл {#dicts-external_dicts_dict_sources-local_file} ## Локальный файл {#dicts-external_dicts_dict_sources-local_file}
@ -93,7 +95,7 @@ SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated'))
Поля настройки: Поля настройки:
- `path` — абсолютный путь к файлу. - `path` — абсолютный путь к файлу.
- `format` — формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../../interfaces/formats.md#formats)». - `format` — формат файла. Поддерживаются все форматы, описанные в разделе [Форматы](../../../interfaces/formats.md#formats).
Если словарь с источником `FILE` создается с помощью DDL-команды (`CREATE DICTIONARY ...`), источник словаря должен быть расположен в каталоге `user_files`. Иначе пользователи базы данных будут иметь доступ к произвольному файлу на узле ClickHouse. Если словарь с источником `FILE` создается с помощью DDL-команды (`CREATE DICTIONARY ...`), источник словаря должен быть расположен в каталоге `user_files`. Иначе пользователи базы данных будут иметь доступ к произвольному файлу на узле ClickHouse.
@ -112,6 +114,7 @@ SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated'))
<executable> <executable>
<command>cat /opt/dictionaries/os.tsv</command> <command>cat /opt/dictionaries/os.tsv</command>
<format>TabSeparated</format> <format>TabSeparated</format>
<implicit_key>false</implicit_key>
</executable> </executable>
</source> </source>
``` ```
@ -119,9 +122,41 @@ SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated'))
Поля настройки: Поля настройки:
- `command` — абсолютный путь к исполняемому файлу или имя файла (если каталог программы прописан в `PATH`). - `command` — абсолютный путь к исполняемому файлу или имя файла (если каталог программы прописан в `PATH`).
- `format` — формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../../interfaces/formats.md#formats)». - `format` — формат файла. Поддерживаются все форматы, описанные в разделе [Форматы](../../../interfaces/formats.md#formats).
- `implicit_key` — исходный исполняемый файл может возвращать только значения, а соответствие запрошенным ключам определено неявно — порядком строк в результате. Значение по умолчанию: false. Необязательный параметр.
Этот источник словаря может быть настроен только с помощью XML-конфигурации. Создание словарей с исполняемым источником с помощью DDL отключено. Иначе пользователь базы данных сможет выполнить произвольный бинарный файл на узле ClickHouse. Этот источник словаря может быть настроен только с помощью XML-конфигурации. Создание словарей с исполняемым источником с помощью DDL запрещено. Иначе пользователь сможет выполнить произвольный бинарный файл на сервере ClickHouse.
## Исполняемый пул {#dicts-external_dicts_dict_sources-executable_pool}
Исполняемый пул позволяет загружать данные из пула процессов. Этот источник не работает со словарями, которые требуют загрузки всех данных из источника. Исполняемый пул работает словарями, которые размещаются [следующими способами](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory): `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct`.
Исполняемый пул генерирует пул процессов с помощью указанной команды и оставляет их активными, пока они не завершатся. Программа считывает данные из потока STDIN пока он доступен и выводит результат в поток STDOUT, а затем ожидает следующего блока данных из STDIN. ClickHouse не закрывает поток STDIN после обработки блока данных и отправляет в него следующую порцию данных, когда это требуется. Исполняемый скрипт должен быть готов к такому способу обработки данных — он должен заранее опрашивать STDIN и отправлять данные в STDOUT.
Пример настройки:
``` xml
<source>
<executable_pool>
<command><command>while read key; do printf "$key\tData for key $key\n"; done</command</command>
<format>TabSeparated</format>
<pool_size>10</pool_size>
<max_command_execution_time>10<max_command_execution_time>
<implicit_key>false</implicit_key>
</executable_pool>
</source>
```
Поля настройки:
- `command` — абсолютный путь к файлу или имя файла (если каталог программы записан в `PATH`).
- `format` — формат файла. Поддерживаются все форматы, описанные в “[Форматы](../../../interfaces/formats.md#formats)”.
- `pool_size` — размер пула. Если в поле `pool_size` указан 0, то размер пула не ограничен.
- `command_termination_timeout` — скрипт исполняемого пула должен включать основной цикл чтения-записи. После уничтожения словаря канал закрывается. При этом исполняемый файл имеет `command_termination_timeout` секунд для завершения работы, прежде чем ClickHouse пошлет сигнал SIGTERM дочернему процессу. Указывается в секундах. Значение по умолчанию: 10. Необязательный параметр.
- `max_command_execution_time` — максимальное количество времени для исполняемого скрипта на обработку блока данных. Указывается в секундах. Значение по умолчанию: 10. Необязательный параметр.
- `implicit_key` — исходный исполняемый файл может возвращать только значения, а соответствие запрошенным ключам определено неявно — порядком строк в результате. Значение по умолчанию: false. Необязательный параметр.
Этот источник словаря может быть настроен только с помощью XML-конфигурации. Создание словарей с исполняемым источником с помощью DDL запрещено. Иначе пользователь сможет выполнить произвольный бинарный файл на сервере ClickHouse.
## HTTP(s) {#dicts-external_dicts_dict_sources-http} ## HTTP(s) {#dicts-external_dicts_dict_sources-http}
@ -451,6 +486,7 @@ LIFETIME(MIN 300 MAX 360)
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -468,6 +504,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```
@ -492,6 +529,8 @@ SOURCE(MYSQL(
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
- `fail_on_connection_loss` параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`.
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
Пример настройки: Пример настройки:
@ -507,6 +546,7 @@ MySQL можно подключить на локальном хосте чер
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -523,6 +563,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```
@ -729,4 +770,3 @@ Setting fields:
- `where` Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр. - `where` Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр.
- `invalidate_query` Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - `invalidate_query` Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -39,10 +39,49 @@ toc_title: "Массивы"
Принимает пустой массив и возвращает массив из одного элемента, равного значению по умолчанию. Принимает пустой массив и возвращает массив из одного элемента, равного значению по умолчанию.
## range(N) {#rangen}
Возвращает массив чисел от 0 до N-1. ## range(end), range(\[start, \] end \[, step\]) {#range}
На всякий случай, если на блок данных, создаются массивы суммарной длины больше 100 000 000 элементов, то кидается исключение.
Возвращает массив чисел от `start` до `end - 1` с шагом `step`.
**Синтаксис**
``` sql
range([start, ] end [, step])
```
**Аргументы**
- `start` — начало диапазона. Обязательно, когда указан `step`. По умолчанию равно `0`. Тип: [UInt](../data-types/int-uint.md)
- `end` — конец диапазона. Обязательный аргумент. Должен быть больше, чем `start`. Тип: [UInt](../data-types/int-uint.md)
- `step` — шаг обхода. Необязательный аргумент. По умолчанию равен `1`. Тип: [UInt](../data-types/int-uint.md)
**Возвращаемые значения**
- массив `UInt` чисел от `start` до `end - 1` с шагом `step`
**Особенности реализации**
- Не поддерживаются отрицательные значения аргументов: `start`, `end`, `step` имеют тип `UInt`.
- Если в результате запроса создаются массивы суммарной длиной больше 100 000 000 элементов, то генерируется исключение.
**Примеры**
Запрос:
``` sql
SELECT range(5), range(1, 5), range(1, 5, 2);
```
Ответ:
```txt
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
└─────────────┴─────────────┴────────────────┘
```
## array(x1, …), оператор \[x1, …\] {#arrayx1-operator-x1} ## array(x1, …), оператор \[x1, …\] {#arrayx1-operator-x1}
@ -1576,4 +1615,4 @@ SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as r
┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ ┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐
│ 6 │ Float64 │ │ 6 │ Float64 │
└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ └─────┴──────────────────────────────────────────────────────────────────────────────────────────┘
``` ```

View File

@ -13,7 +13,7 @@ toc_title: "Прочие функции"
Возвращает именованное значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурации сервера. Возвращает именованное значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурации сервера.
**Синтаксис** **Синтаксис**
```sql ```sql
getMacro(name) getMacro(name)
@ -854,8 +854,8 @@ WHERE diff != 1
## runningConcurrency {#runningconcurrency} ## runningConcurrency {#runningconcurrency}
Подсчитывает количество одновременно идущих событий. Подсчитывает количество одновременно идущих событий.
У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных. У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных.
Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке. Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке.
!!! warning "Предупреждение" !!! warning "Предупреждение"
События должны быть отсортированы по возрастанию времени начала. Если это требование нарушено, то функция вызывает исключение. События должны быть отсортированы по возрастанию времени начала. Если это требование нарушено, то функция вызывает исключение.
@ -1371,11 +1371,84 @@ SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesy
└───────────┴────────┘ └───────────┴────────┘
``` ```
## initializeAggregation {#initializeaggregation}
Вычисляет результат агрегатной функции для каждой строки. Предназначена для инициализации агрегатных функций с комбинатором [-State](../../sql-reference/aggregate-functions/combinators.md#state). Может быть полезна для создания состояний агрегатных функций для последующей их вставки в столбцы типа [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) или использования в качестве значений по-умолчанию.
**Синтаксис**
``` sql
initializeAggregation (aggregate_function, arg1, arg2, ..., argN)
```
**Аргументы**
- `aggregate_function` — название агрегатной функции, состояние которой нужно создать. [String](../../sql-reference/data-types/string.md#string).
- `arg` — аргументы, которые передаются в агрегатную функцию.
**Возвращаемое значение**
- В каждой строке результат агрегатной функции, примененной к аргументам из этой строки.
Тип возвращаемого значения такой же, как и у функции, переданной первым аргументом.
**Пример**
Запрос:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000));
```
Результат:
```text
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘
```
Запрос:
```sql
SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5));
```
Результат:
```text
┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
│ 2 │ AggregateFunction(sum, UInt8) │
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
└────────────────────────────┴───────────────────────────────┘
```
Пример с движком таблиц `AggregatingMergeTree` и столбцом типа `AggregateFunction`:
```sql
CREATE TABLE metrics
(
key UInt64,
value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0))
)
ENGINE = AggregatingMergeTree
ORDER BY key
```
```sql
INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42)))
```
**Смотрите также**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
## finalizeAggregation {#function-finalizeaggregation} ## finalizeAggregation {#function-finalizeaggregation}
Принимает состояние агрегатной функции. Возвращает результат агрегирования (или конечное состояние при использовании комбинатора [-State](../../sql-reference/aggregate-functions/combinators.md#state)). Принимает состояние агрегатной функции. Возвращает результат агрегирования (или конечное состояние при использовании комбинатора [-State](../../sql-reference/aggregate-functions/combinators.md#state)).
**Синтаксис** **Синтаксис**
``` sql ``` sql
finalizeAggregation(state) finalizeAggregation(state)
@ -1421,7 +1494,7 @@ SELECT finalizeAggregation(( SELECT sumState(number) FROM numbers(10)));
└──────────────────────────────────┘ └──────────────────────────────────┘
``` ```
Обратите внимание, что значения `NULL` игнорируются. Обратите внимание, что значения `NULL` игнорируются.
Запрос: Запрос:
@ -1470,7 +1543,7 @@ FROM numbers(10);
**Смотрите также** **Смотрите также**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) - [initializeAggregation](#initializeaggregation)
## runningAccumulate {#runningaccumulate} ## runningAccumulate {#runningaccumulate}
@ -1537,13 +1610,13 @@ SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k)
Запрос: Запрос:
```sql ```sql
SELECT SELECT
grouping, grouping,
item, item,
runningAccumulate(state, grouping) AS res runningAccumulate(state, grouping) AS res
FROM FROM
( (
SELECT SELECT
toInt8(number / 4) AS grouping, toInt8(number / 4) AS grouping,
number AS item, number AS item,
sumState(number) AS state sumState(number) AS state
@ -1732,7 +1805,7 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers
randomString(length) randomString(length)
``` ```
**Аргументы** **Аргументы**
- `length` — длина строки. Положительное целое число. - `length` — длина строки. Положительное целое число.
@ -1831,13 +1904,13 @@ randomStringUTF8(length)
Запрос: Запрос:
```sql ```sql
SELECT randomStringUTF8(13) SELECT randomStringUTF8(13)
``` ```
Результат: Результат:
```text ```text
┌─randomStringUTF8(13)─┐ ┌─randomStringUTF8(13)─┐
│ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │ │ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │
└──────────────────────┘ └──────────────────────┘
@ -1848,13 +1921,13 @@ SELECT randomStringUTF8(13)
Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings). Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings).
**Синтаксис** **Синтаксис**
```sql ```sql
getSetting('custom_setting') getSetting('custom_setting')
``` ```
**Параметр** **Параметр**
- `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md). - `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md).
@ -1866,7 +1939,7 @@ getSetting('custom_setting')
```sql ```sql
SET custom_a = 123; SET custom_a = 123;
SELECT getSetting('custom_a'); SELECT getSetting('custom_a');
``` ```
**Результат** **Результат**
@ -1875,7 +1948,7 @@ SELECT getSetting('custom_a');
123 123
``` ```
**См. также** **См. также**
- [Пользовательские настройки](../../operations/settings/index.md#custom_settings) - [Пользовательские настройки](../../operations/settings/index.md#custom_settings)
@ -1889,10 +1962,10 @@ SELECT getSetting('custom_a');
isDecimalOverflow(d, [p]) isDecimalOverflow(d, [p])
``` ```
**Аргументы** **Аргументы**
- `d` — число. [Decimal](../../sql-reference/data-types/decimal.md). - `d` — число. [Decimal](../../sql-reference/data-types/decimal.md).
- `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). - `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Возвращаемое значение** **Возвращаемое значение**
@ -1926,7 +1999,7 @@ SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9),
countDigits(x) countDigits(x)
``` ```
**Аргументы** **Аргументы**
- `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число. - `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число.

View File

@ -189,6 +189,23 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL '4' day + INTERV
└─────────────────────┴────────────────────────────────────────────────────────────┘ └─────────────────────┴────────────────────────────────────────────────────────────┘
``` ```
Вы можете изменить дату, не используя синтаксис `INTERVAL`, а просто добавив или отняв секунды, минуты и часы. Например, чтобы передвинуть дату на один день вперед, можно прибавить к ней значение `60*60*24`.
!!! note "Примечание"
Синтаксис `INTERVAL` или функция `addDays` предпочтительнее для работы с датами. Сложение с числом (например, синтаксис `now() + ...`) не учитывает региональные настройки времени, например, переход на летнее время.
Пример:
``` sql
SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 60 * 24 AS time_plus_24_hours, time + toIntervalDay(1) AS time_plus_1_day;
```
``` text
┌────────────────time─┬──time_plus_24_hours─┬─────time_plus_1_day─┐
│ 2014-10-26 00:00:00 │ 2014-10-26 23:00:00 │ 2014-10-27 00:00:00 │
└─────────────────────┴─────────────────────┴─────────────────────┘
```
**Смотрите также** **Смотрите также**
- Тип данных [Interval](../../sql-reference/operators/index.md) - Тип данных [Interval](../../sql-reference/operators/index.md)
@ -296,4 +313,3 @@ SELECT * FROM t_null WHERE y IS NOT NULL
│ 2 │ 3 │ │ 2 │ 3 │
└───┴───┘ └───┴───┘
``` ```

View File

@ -362,6 +362,79 @@ SHOW [CURRENT] QUOTA
SHOW ACCESS SHOW ACCESS
``` ```
## SHOW CLUSTER(s) {#show-cluster-statement}
Возвращает список кластеров. Все доступные кластеры перечислены в таблице [system.clusters](../../operations/system-tables/clusters.md).
!!! info "Note"
По запросу `SHOW CLUSTER name` вы получите содержимое таблицы system.clusters для этого кластера.
### Синтаксис {#show-cluster-syntax}
``` sql
SHOW CLUSTER '<name>'
SHOW CLUSTERS [LIKE|NOT LIKE '<pattern>'] [LIMIT <N>]
```
### Примеры {#show-cluster-examples}
Запрос:
``` sql
SHOW CLUSTERS;
```
Результат:
```text
┌─cluster──────────────────────────────────────┐
│ test_cluster_two_shards │
│ test_cluster_two_shards_internal_replication │
│ test_cluster_two_shards_localhost │
│ test_shard_localhost │
│ test_shard_localhost_secure │
│ test_unavailable_shard │
└──────────────────────────────────────────────┘
```
Запрос:
``` sql
SHOW CLUSTERS LIKE 'test%' LIMIT 1;
```
Результат:
```text
┌─cluster─────────────────┐
│ test_cluster_two_shards │
└─────────────────────────┘
```
Запрос:
``` sql
SHOW CLUSTER 'test_shard_localhost' FORMAT Vertical;
```
Результат:
```text
Row 1:
──────
cluster: test_shard_localhost
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: 127.0.0.1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
```
## SHOW SETTINGS {#show-settings} ## SHOW SETTINGS {#show-settings}
Возвращает список системных настроек и их значений. Использует данные из таблицы [system.settings](../../operations/system-tables/settings.md). Возвращает список системных настроек и их значений. Использует данные из таблицы [system.settings](../../operations/system-tables/settings.md).
@ -426,4 +499,3 @@ SHOW CHANGED SETTINGS ILIKE '%MEMORY%'
**См. также** **См. также**
- Таблица [system.settings](../../operations/system-tables/settings.md) - Таблица [system.settings](../../operations/system-tables/settings.md)

View File

@ -47,6 +47,13 @@ When all prerequisites are installed, running `build.py` without args (there are
The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888. The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888.
## How to change code highlighting? {#how-to-change-code-hl}
ClickHouse does not use mkdocs `highlightjs` feature. It uses modified pygments styles instead.
If you want to change code highlighting, edit the `website/css/highlight.css` file.
Currently, an [eighties](https://github.com/idleberg/base16-pygments/blob/master/css/base16-eighties.dark.css) theme
is used.
## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes} ## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes}
At the moment theres no easy way to do just that, but you can consider: At the moment theres no easy way to do just that, but you can consider:

Some files were not shown because too many files have changed in this diff Show More