mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into merging_external_source_cassandra
This commit is contained in:
commit
8921041706
@ -15,15 +15,27 @@ using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
namespace
|
||||
{
|
||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||
}
|
||||
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
/// If more than one argument is provided,
|
||||
/// the first argument is interpreted as template with {}-substitutions
|
||||
/// and the latter arguments treat as values to substitute.
|
||||
/// If only one argument is provided, it is threat as message without substitutions.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
{ \
|
||||
const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \
|
||||
(CurrentThread::getGroup()->client_logs_level >= (priority)); \
|
||||
if ((logger)->is((PRIORITY)) || is_clients_log) \
|
||||
{ \
|
||||
std::string formatted_message = fmt::format(__VA_ARGS__); \
|
||||
std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \
|
||||
if (auto channel = (logger)->getChannel()) \
|
||||
{ \
|
||||
std::string file_function; \
|
||||
|
9
cmake/autogenerated_versions.txt
Normal file
9
cmake/autogenerated_versions.txt
Normal file
@ -0,0 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54435)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
SET(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
SET(VERSION_STRING 20.5.1.1)
|
||||
# end of autochange
|
@ -112,16 +112,21 @@ if (PROTOBUF_GENERATE_CPP_SCRIPT_MODE)
|
||||
set (intermediate_dir ${DIR}/intermediate)
|
||||
set (intermediate_output "${intermediate_dir}/${FILENAME}")
|
||||
|
||||
if (COMPILER_ID STREQUAL "GNU")
|
||||
if (COMPILER_ID STREQUAL "Clang")
|
||||
set (pragma_push "#pragma clang diagnostic push\n")
|
||||
set (pragma_pop "#pragma clang diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma clang diagnostic ignored \"-Weverything\"\n")
|
||||
elseif (COMPILER_ID MATCHES "GNU")
|
||||
set (pragma_push "#pragma GCC diagnostic push\n")
|
||||
set (pragma_pop "#pragma GCC diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma GCC diagnostic ignored \"-Wall\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wextra\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Warray-bounds\"\n")
|
||||
elseif (COMPILER_ID MATCHES "Clang")
|
||||
set (pragma_push "#pragma clang diagnostic push\n")
|
||||
set (pragma_pop "#pragma clang diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma clang diagnostic ignored \"-Weverything\"\n")
|
||||
"#pragma GCC diagnostic ignored \"-Warray-bounds\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wold-style-cast\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wshadow\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wsuggest-override\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wcast-qual\"\n"
|
||||
"#pragma GCC diagnostic ignored \"-Wunused-parameter\"\n")
|
||||
endif()
|
||||
|
||||
if (${FILENAME} MATCHES ".*\\.h")
|
||||
|
@ -1,12 +1,4 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
set(VERSION_REVISION 54435)
|
||||
set(VERSION_MAJOR 20)
|
||||
set(VERSION_MINOR 5)
|
||||
set(VERSION_PATCH 1)
|
||||
set(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
set(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
set(VERSION_STRING 20.5.1.1)
|
||||
# end of autochange
|
||||
include(${CMAKE_SOURCE_DIR}/cmake/autogenerated_versions.txt)
|
||||
|
||||
set(VERSION_EXTRA "" CACHE STRING "")
|
||||
set(VERSION_TWEAK "" CACHE STRING "")
|
||||
|
25
cmake/yandex/ya.make.versions.inc
Normal file
25
cmake/yandex/ya.make.versions.inc
Normal file
@ -0,0 +1,25 @@
|
||||
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/autogenerated_versions.txt)
|
||||
|
||||
# TODO: not sure if this is customizable per-binary
|
||||
SET(VERSION_NAME "ClickHouse")
|
||||
|
||||
# TODO: not quite sure how to replace dash with space in ya.make
|
||||
SET(VERSION_FULL "${VERSION_NAME}-${VERSION_STRING}")
|
||||
|
||||
CFLAGS (GLOBAL -DDBMS_NAME=\"ClickHouse\")
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH})
|
||||
CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\")
|
||||
CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
|
||||
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
|
||||
CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH})
|
||||
|
||||
# TODO: not supported yet, not sure if ya.make supports arithmetics.
|
||||
CFLAGS (GLOBAL -DVERSION_INTEGER=0)
|
||||
|
||||
CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\")
|
||||
CFLAGS (GLOBAL -DVERSION_OFFICIAL=\"-arcadia\")
|
||||
CFLAGS (GLOBAL -DVERSION_REVISION=${VERSION_REVISION})
|
||||
CFLAGS (GLOBAL -DVERSION_STRING=\"\\\"${VERSION_STRING}\\\"\")
|
||||
|
@ -618,7 +618,12 @@ if (USE_INTERNAL_CCTZ)
|
||||
|
||||
add_library(tzdata STATIC ${TZ_OBJS})
|
||||
set_target_properties(tzdata PROPERTIES LINKER_LANGUAGE C)
|
||||
target_link_libraries(cctz -Wl,--whole-archive tzdata -Wl,--no-whole-archive) # whole-archive prevents symbols from being discarded
|
||||
# whole-archive prevents symbols from being discarded for unknown reason
|
||||
# CMake can shuffle each of target_link_libraries arguments with other
|
||||
# libraries in linker command. To avoid this we hardcode whole-archive
|
||||
# library into single string.
|
||||
add_dependencies(cctz tzdata)
|
||||
target_link_libraries(cctz INTERFACE "-Wl,--whole-archive $<TARGET_FILE:tzdata> -Wl,--no-whole-archive")
|
||||
endif ()
|
||||
|
||||
else ()
|
||||
|
2
contrib/jemalloc
vendored
2
contrib/jemalloc
vendored
@ -1 +1 @@
|
||||
Subproject commit cd2931ad9bbd78208565716ab102e86d858c2fff
|
||||
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
@ -17,7 +17,13 @@ if (ENABLE_JEMALLOC)
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# this problem should go away.
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0")
|
||||
# CACHE variable is empty, to allow changing defaults without necessity
|
||||
# to purge cache
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "${JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE}")
|
||||
endif()
|
||||
message (STATUS "jemalloc malloc_conf: ${JEMALLOC_CONFIG_MALLOC_CONF}")
|
||||
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc")
|
||||
@ -55,6 +61,7 @@ if (ENABLE_JEMALLOC)
|
||||
${LIBRARY_DIR}/src/ticker.c
|
||||
${LIBRARY_DIR}/src/tsd.c
|
||||
${LIBRARY_DIR}/src/witness.c
|
||||
${LIBRARY_DIR}/src/safety_check.c
|
||||
)
|
||||
if (OS_DARWIN)
|
||||
list(APPEND SRCS ${LIBRARY_DIR}/src/zone.c)
|
||||
@ -89,6 +96,8 @@ if (ENABLE_JEMALLOC)
|
||||
endif ()
|
||||
|
||||
target_compile_options(jemalloc PRIVATE -Wno-redundant-decls)
|
||||
# for RTLD_NEXT
|
||||
target_compile_options(jemalloc PRIVATE -D_GNU_SOURCE)
|
||||
else ()
|
||||
find_library(LIBRARY_JEMALLOC jemalloc)
|
||||
find_path(INCLUDE_JEMALLOC jemalloc/jemalloc.h)
|
||||
|
@ -5,6 +5,12 @@
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format_arg(...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
@ -4,12 +4,13 @@
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "5.1.0-56-g41b7372eadee941b9164751b8d4963f915d3ceae"
|
||||
#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 1
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 56
|
||||
#define JEMALLOC_VERSION_GID "41b7372eadee941b9164751b8d4963f915d3ceae"
|
||||
#define JEMALLOC_VERSION_MINOR 2
|
||||
#define JEMALLOC_VERSION_BUGFIX 1
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
@ -68,6 +69,7 @@
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_ARG(i)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
@ -95,6 +97,11 @@
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
|
||||
# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_ARG(i)
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
@ -17,6 +17,7 @@
|
||||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_mallocx mallocx
|
||||
# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
# define je_nallocx nallocx
|
||||
# define je_posix_memalign posix_memalign
|
||||
# define je_rallocx rallocx
|
||||
|
@ -65,13 +65,13 @@ typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
@ -1,12 +1,6 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
@ -25,7 +19,7 @@
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
@ -41,7 +35,7 @@
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
#define HAVE_CPU_SPINWAIT 9
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
@ -55,25 +49,13 @@
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -85,19 +67,13 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -243,6 +219,12 @@
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
@ -297,7 +279,7 @@
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
// #define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
@ -379,4 +361,7 @@
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -21,7 +21,7 @@
|
||||
# include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
@ -161,7 +161,26 @@ static const bool config_log =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
|
@ -1,123 +0,0 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "5.1.0-97-gcd2931ad9bbd78208565716ab102e86d858c2fff"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 1
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 97
|
||||
#define JEMALLOC_VERSION_GID "cd2931ad9bbd78208565716ab102e86d858c2fff"
|
||||
#define JEMALLOC_VERSION_GID_IDENT cd2931ad9bbd78208565716ab102e86d858c2fff
|
||||
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
|
||||
#else
|
||||
# define MALLOCX_ALIGN(a) \
|
||||
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
|
||||
ffs((int)(((size_t)(a))>>32))+31))
|
||||
#endif
|
||||
#define MALLOCX_ZERO ((int)0x40)
|
||||
/*
|
||||
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
|
||||
* encodes MALLOCX_TCACHE_NONE.
|
||||
*/
|
||||
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
/*
|
||||
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
|
||||
*/
|
||||
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
|
||||
|
||||
/*
|
||||
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
|
||||
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
|
||||
* definition is intentionally specified in raw decimal format to support
|
||||
* cpp-based string concatenation, e.g.
|
||||
*
|
||||
* #define STRINGIFY_HELPER(x) #x
|
||||
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
*
|
||||
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
|
||||
* 0);
|
||||
*/
|
||||
#define MALLCTL_ARENAS_ALL 4096
|
||||
/*
|
||||
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
|
||||
* destroyed arenas.
|
||||
*/
|
||||
#define MALLCTL_ARENAS_DESTROYED 4097
|
||||
|
||||
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
|
||||
# define JEMALLOC_CXX_THROW throw()
|
||||
#else
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
# else
|
||||
# define JEMALLOC_NOTHROW
|
||||
# endif
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
|
||||
# if _MSC_VER >= 1900 && !defined(__EDG__)
|
||||
# define JEMALLOC_ALLOCATOR __declspec(allocator)
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
#elif defined(JEMALLOC_HAVE_ATTR)
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
|
||||
# else
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# endif
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#endif
|
@ -1,77 +0,0 @@
|
||||
typedef struct extent_hooks_s extent_hooks_t;
|
||||
|
||||
/*
|
||||
* void *
|
||||
* extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
* size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
|
||||
bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* void
|
||||
* extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
|
||||
size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t offset, size_t length, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
* size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
* void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
@ -1,11 +1,6 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
@ -24,7 +19,7 @@
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
@ -54,25 +49,13 @@
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
/*
|
||||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -84,20 +67,13 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// Don't want dependency on newer GLIBC
|
||||
//#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -160,6 +136,9 @@
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
@ -240,6 +219,12 @@
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
@ -252,6 +237,12 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
@ -288,7 +279,7 @@
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
//#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
@ -370,4 +361,7 @@
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -21,7 +21,7 @@
|
||||
# include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
@ -161,7 +161,26 @@ static const bool config_log =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
|
@ -1,43 +0,0 @@
|
||||
/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
|
||||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE_VALLOC
|
||||
|
||||
/*
|
||||
* At least Linux omits the "const" in:
|
||||
*
|
||||
* size_t malloc_usable_size(const void *ptr);
|
||||
*
|
||||
* Match the operating system's prototype.
|
||||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST
|
||||
|
||||
/*
|
||||
* If defined, specify throw() for the public function prototypes when compiling
|
||||
* with C++. The only justification for this is to match the prototypes that
|
||||
* glibc defines.
|
||||
*/
|
||||
#define JEMALLOC_USE_CXX_THROW
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _WIN64
|
||||
# define LG_SIZEOF_PTR_WIN 3
|
||||
# else
|
||||
# define LG_SIZEOF_PTR_WIN 2
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||
#define LG_SIZEOF_PTR 3
|
@ -1,66 +0,0 @@
|
||||
/*
|
||||
* The je_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
|
||||
*/
|
||||
extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *je_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
@ -21,7 +21,7 @@ RUN apt-get update \
|
||||
locales \
|
||||
ca-certificates \
|
||||
wget \
|
||||
tzata \
|
||||
tzdata \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -317,7 +317,7 @@ function report
|
||||
rm -r report ||:
|
||||
mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv ||:
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
build_log_column_definitions
|
||||
|
||||
@ -434,7 +434,7 @@ create table wall_clock engine Memory as select *
|
||||
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
|
||||
|
||||
create table slow_on_client_tsv engine File(TSV, 'report/slow-on-client.tsv') as
|
||||
select client, server, floor(client/server, 3) p, query_display_name
|
||||
select client, server, floor(client/server, 3) p, test, query_display_name
|
||||
from query_time left join query_display_names using (test, query_index)
|
||||
where p > 1.02 order by p desc;
|
||||
|
||||
|
@ -189,7 +189,7 @@ if args.report == 'main':
|
||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||
error_tests += len(slow_on_client_rows)
|
||||
printSimpleTable('Slow on client',
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Query'],
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||
slow_on_client_rows)
|
||||
|
||||
def print_changes():
|
||||
|
@ -12,7 +12,7 @@ readonly CLICKHOUSE_PACKAGES_ARG="${2}"
|
||||
CLICKHOUSE_SERVER_IMAGE="${3}"
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath ${2})" # or --no-rebuild
|
||||
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath ${2})" # or --no-rebuild
|
||||
fi
|
||||
|
||||
|
||||
@ -26,19 +26,19 @@ fi
|
||||
# TODO: optionally mount most recent clickhouse-test and queries directory from local machine
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
|
||||
--target clickhouse-test-runner-base \
|
||||
-t clickhouse-test-runner-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
|
||||
--target clickhouse-test-runner-base \
|
||||
-t clickhouse-test-runner-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
|
||||
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker run \
|
||||
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse-test-runner-installing-packages \
|
||||
clickhouse-test-runner-base:preinstall
|
||||
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
docker run \
|
||||
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse-test-runner-installing-packages \
|
||||
clickhouse-test-runner-base:preinstall
|
||||
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
|
||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
||||
fi
|
||||
|
||||
# # Create a bind-volume to the clickhouse-test script file
|
||||
@ -47,38 +47,38 @@ fi
|
||||
|
||||
# Build server image (optional) from local packages
|
||||
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
|
||||
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
|
||||
CLICKHOUSE_SERVER_IMAGE="yandex/clickhouse-server:local"
|
||||
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server-base \
|
||||
-t clickhouse-server-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server-base \
|
||||
-t clickhouse-server-base:preinstall \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
|
||||
docker rm -f clickhouse_server_base_installing_server || true
|
||||
docker run -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse_server_base_installing_server \
|
||||
clickhouse-server-base:preinstall
|
||||
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
|
||||
docker rm -f clickhouse_server_base_installing_server || true
|
||||
docker run -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
||||
--name clickhouse_server_base_installing_server \
|
||||
clickhouse-server-base:preinstall
|
||||
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
|
||||
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server \
|
||||
-t "${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
fi
|
||||
docker build \
|
||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
||||
--target clickhouse-server \
|
||||
-t "${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker rm -f test-runner || true
|
||||
docker-compose down
|
||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
create \
|
||||
--build --force-recreate
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
create \
|
||||
--build --force-recreate
|
||||
|
||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
run \
|
||||
--name test-runner \
|
||||
test-runner
|
||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
||||
run \
|
||||
--name test-runner \
|
||||
test-runner
|
||||
|
@ -37,6 +37,8 @@ The supported formats are:
|
||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
| [Arrow](#data-format-arrow) | ✔ | ✔ |
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✗ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
@ -985,9 +987,9 @@ See also [how to read/write length-delimited protobuf messages in popular langua
|
||||
|
||||
## Avro {#data-format-avro}
|
||||
|
||||
[Apache Avro](http://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||
|
||||
ClickHouse Avro format supports reading and writing [Avro data files](http://avro.apache.org/docs/current/spec.html#Object+Container+Files).
|
||||
ClickHouse Avro format supports reading and writing [Avro data files](https://avro.apache.org/docs/current/spec.html#Object+Container+Files).
|
||||
|
||||
### Data Types Matching {#data_types-matching}
|
||||
|
||||
@ -1009,7 +1011,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `long (timestamp-millis)` \* | [DateTime64(3)](../sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
|
||||
| `long (timestamp-micros)` \* | [DateTime64(6)](../sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
|
||||
|
||||
\* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
\* [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
|
||||
Unsupported Avro data types: `record` (non-root), `map`
|
||||
|
||||
@ -1095,7 +1097,7 @@ SELECT * FROM topic1_stream;
|
||||
|
||||
## Parquet {#data-format-parquet}
|
||||
|
||||
[Apache Parquet](http://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format.
|
||||
[Apache Parquet](https://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format.
|
||||
|
||||
### Data Types Matching {#data_types-matching-2}
|
||||
|
||||
@ -1141,6 +1143,16 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_
|
||||
|
||||
To exchange data with Hadoop, you can use [HDFS table engine](../engines/table-engines/integrations/hdfs.md).
|
||||
|
||||
## Arrow {#data-format-arrow}
|
||||
|
||||
[Apache Arrow](https://arrow.apache.org/) comes with two built-in columnar storage formats. ClickHouse supports read and write operations for these formats.
|
||||
|
||||
`Arrow` is Apache Arrow's "file mode" format. It is designed for in-memory random access.
|
||||
|
||||
## ArrowStream {#data-format-arrow-stream}
|
||||
|
||||
`ArrowStream` is Apache Arrow's "stream mode" format. It is designed for in-memory stream processing.
|
||||
|
||||
## ORC {#data-format-orc}
|
||||
|
||||
[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse.
|
||||
|
@ -25,6 +25,7 @@ toc_title: Integrations
|
||||
- Message queues
|
||||
- [Kafka](https://kafka.apache.org)
|
||||
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/))
|
||||
- [stream-loader-clickhouse](https://github.com/adform/stream-loader)
|
||||
- Stream processing
|
||||
- [Flink](https://flink.apache.org)
|
||||
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
|
||||
|
@ -41,6 +41,7 @@ toc_title: Adopters
|
||||
| [Integros](https://integros.com){.favicon} | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||
| [Kodiak Data](https://www.kodiakdata.com/){.favicon} | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| [Kontur](https://kontur.ru){.favicon} | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| [Lawrence Berkeley National Laboratory](https://www.lbl.gov){.favicon} | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| [LifeStreet](https://lifestreet.com/){.favicon} | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||
| [Mail.ru Cloud Solutions](https://mcs.mail.ru/){.favicon} | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||
| [MessageBird](https://www.messagebird.com){.favicon} | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||
|
@ -586,11 +586,11 @@ If the table doesn’t exist, ClickHouse will create it. If the structure of the
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting.
|
||||
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
|
||||
Use the following parameters to configure logging:
|
||||
|
||||
|
@ -404,6 +404,35 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## partial_merge_join_optimizations {#partial_merge_join_optimizations}
|
||||
|
||||
Disables optimizations in partial merge join algorithm for [JOIN](../../sql-reference/statements/select/join.md) queries.
|
||||
|
||||
By default, this setting enables improvements that could lead to wrong results. If you see suspicious results in your queries, disable optimizations by this setting. Optimizations can be different in different versions of the ClickHouse server.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Optimizations disabled.
|
||||
- 1 — Optimizations enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## partial_merge_join_rows_in_right_blocks {#partial_merge_join_rows_in_right_blocks}
|
||||
|
||||
Limits sizes of right-hand join data blocks in partial merge join algorithm for [JOIN](../../sql-reference/statements/select/join.md) queries.
|
||||
|
||||
ClickHouse server:
|
||||
|
||||
1. Splits right-hand join data into blocks with up to the specified number of rows.
|
||||
2. Indexes each block with their minimum and maximum values
|
||||
3. Unloads prepared blocks to disk if possible.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer. Recommended range of values: [1000, 100000].
|
||||
|
||||
Default value: 65536.
|
||||
|
||||
## any_join_distinct_right_table_keys {#any_join_distinct_right_table_keys}
|
||||
|
||||
Enables legacy ClickHouse server behavior in `ANY INNER|LEFT JOIN` operations.
|
||||
@ -569,7 +598,7 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
|
||||
Setting up query threads logging.
|
||||
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server configuration parameter.
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -5,7 +5,7 @@ toc_title: System Tables
|
||||
|
||||
# System Tables {#system-tables}
|
||||
|
||||
## Introduction
|
||||
## Introduction {#system-tables-introduction}
|
||||
|
||||
System tables provide information about:
|
||||
|
||||
@ -18,9 +18,12 @@ System tables:
|
||||
- Available only for reading data.
|
||||
- Can't be dropped or altered, but can be detached.
|
||||
|
||||
The `metric_log`, `query_log`, `query_thread_log`, `trace_log` system tables store data in a storage filesystem. Other system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
Most of system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
|
||||
### Sources of System Metrics
|
||||
The [metric_log](#system_tables-metric_log), [query_log](#system_tables-query_log), [query_thread_log](#system_tables-query_thread_log), [trace_log](#system_tables-trace_log) system tables store data in a storage filesystem. You can alter them or remove from a disk manually. If you remove one of that tables from a disk, the ClickHouse server creates the table again at the time of the next recording. A storage period for these tables is not limited, and ClickHouse server doesn't delete their data automatically. You need to organize removing of outdated logs by yourself. For example, you can use [TTL](../sql-reference/statements/alter.md#manipulations-with-table-ttl) settings for removing outdated log records.
|
||||
|
||||
|
||||
### Sources of System Metrics {#system-tables-sources-of-system-metrics}
|
||||
|
||||
For collecting system metrics ClickHouse server uses:
|
||||
|
||||
@ -587,97 +590,150 @@ Columns:
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
Contains information about executed queries, for example, start time, duration of processing, error messages.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
You can change settings of queries logging in the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
You can disable queries logging by setting [log_queries = 0](settings/settings.md#settings-log-queries). We don't recommend to turn off logging because information in this table is important for solving issues.
|
||||
|
||||
The flushing period of logs is set in `flush_interval_milliseconds` parameter of the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing logs, use the [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
ClickHouse doesn't delete logs from the table automatically. See [Introduction](#system-tables-introduction) for more details.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status (see the `type` column) of the query:
|
||||
|
||||
1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created .
|
||||
2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created .
|
||||
3. If an error occurred before launching the query, a single event with the `ExceptionBeforeStart` type is created.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — Query starting date.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Query starting time.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it's `read_rows` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it's `read_bytes` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — Query string.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — Exception message.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 0 — Query was initiated by another query as part of distributed query execution.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID of the query.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — Operating system username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
**Example**
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
```
|
||||
**See Also**
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.query\_thread\_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
The table contains information about each query execution thread.
|
||||
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
@ -729,14 +785,14 @@ Columns:
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `partition_by` parameter).
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.trace\_log {#system_tables-trace_log}
|
||||
|
||||
|
@ -117,6 +117,10 @@ Returns the part of the domain that includes top-level subdomains up to the “f
|
||||
|
||||
For example, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
|
||||
|
||||
### port(URL[, default_port = 0]) {#port}
|
||||
|
||||
Returns the port or `default_port` if there is no port in the URL (or in case of validation error).
|
||||
|
||||
### path {#path}
|
||||
|
||||
Returns the path. Example: `/top/news.html` The path does not include the query string.
|
||||
|
@ -574,11 +574,11 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Настройка логирования потоков выполнения запросов, принятых с настройкой [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads).
|
||||
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query_thread_log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
|
@ -536,7 +536,7 @@ log_queries=1
|
||||
|
||||
Установка логирования информации о потоках выполнения запроса.
|
||||
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log).
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log).
|
||||
|
||||
Пример:
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
# Системные таблицы {#sistemnye-tablitsy}
|
||||
# Системные таблицы {#system-tables}
|
||||
|
||||
|
||||
## Введение {#system-tables-introduction}
|
||||
|
||||
Системные таблицы используются для реализации части функциональности системы, а также предоставляют доступ к информации о работе системы.
|
||||
Вы не можете удалить системную таблицу (хотя можете сделать DETACH).
|
||||
@ -544,182 +547,156 @@ CurrentMetric_ReplicatedChecks: 0
|
||||
- `source_file` (LowCardinality(String)) — Исходный файл, из которого была сделана запись.
|
||||
- `source_line` (UInt64) — Исходная строка, из которой была сделана запись.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Содержит информацию о выполнении запросов. Для каждого запроса вы можете увидеть время начала обработки, продолжительность обработки, сообщения об ошибках и другую информацию.
|
||||
Содержит информацию о выполняемых запросах, например, время начала обработки, продолжительность обработки, сообщения об ошибках.
|
||||
|
||||
!!! note "Внимание"
|
||||
Таблица не содержит входных данных для запросов `INSERT`.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
Настойки логгирования можно изменить в секции серверной конфигурации [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log).
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_queries](settings/settings.md#settings-log-queries) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
Можно отключить логгирование настройкой [log_queries = 0](settings/settings.md#settings-log-queries). По-возможности, не отключайте логгирование, поскольку информация из таблицы важна при решении проблем.
|
||||
|
||||
Период сброса логов в таблицу задаётся параметром `flush_interval_milliseconds` в конфигурационной секции [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs).
|
||||
|
||||
ClickHouse не удаляет логи из таблица автоматически. Смотрите [Введение](#system-tables-introduction).
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
|
||||
|
||||
Если таблицу удалить вручную, она создается заново автоматически «на лету». При этом все логи на момент удаления таблицы будут убраны.
|
||||
|
||||
Таблица `system.query_log` содержит информацию о двух видах запросов:
|
||||
|
||||
1. Первоначальные запросы, которые были выполнены непосредственно клиентом.
|
||||
2. Дочерние запросы, инициированные другими запросами (для выполнения распределенных запросов). Для дочерних запросов информация о первоначальном запросе содержится в столбцах `initial_*`.
|
||||
|
||||
В зависимости от статуса (столбец `type`) каждый запрос создаёт одну или две строки в таблице `query_log`:
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа `QueryStart` и `QueryFinish`.
|
||||
2. Если во время обработки запроса возникла ошибка, создаются два события с типами `QueryStart` и `ExceptionWhileProcessing`.
|
||||
3. Если ошибка произошла ещё до запуска запроса, создается одно событие с типом `ExceptionBeforeStart`.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` (`Enum8`) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `'QueryStart' = 1` — успешное начало выполнения запроса.
|
||||
- `'QueryFinish' = 2` — успешное завершение выполнения запроса.
|
||||
- `'ExceptionBeforeStart' = 3` — исключение перед началом обработки запроса.
|
||||
- `'ExceptionWhileProcessing' = 4` — исключение во время обработки запроса.
|
||||
- `event_date` (Date) — дата начала запроса.
|
||||
- `event_time` (DateTime) — время начала запроса.
|
||||
- `query_start_time` (DateTime) — время начала обработки запроса.
|
||||
- `query_duration_ms` (UInt64) — длительность обработки запроса.
|
||||
- `read_rows` (UInt64) — количество прочитанных строк.
|
||||
- `read_bytes` (UInt64) — количество прочитанных байтов.
|
||||
- `written_rows` (UInt64) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` (UInt64) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` (UInt64) — количество строк в результате.
|
||||
- `result_bytes` (UInt64) — объём результата в байтах.
|
||||
- `memory_usage` (UInt64) — потребление RAM запросом.
|
||||
- `query` (String) — текст запроса.
|
||||
- `exception` (String) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `stack_trace` (String) — трассировка (список функций, последовательно вызванных перед ошибкой). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` (UInt8) — вид запроса. Возможные значения:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — дата начала запроса.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала запроса.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала обработки запроса.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность выполнения запроса в миллисекундах.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество строк в результате запроса `SELECT` или количество строк в запросе `INSERT`.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём RAM в байтах, использованный для хранения результата запроса.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — потребление RAM запросом.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — текст запроса.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — вид запроса. Возможные значения:
|
||||
- 1 — запрос был инициирован клиентом.
|
||||
- 0 — запрос был инициирован другим запросом при распределенном запросе.
|
||||
- `user` (String) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` (String) — ID запроса.
|
||||
- `address` (IPv6) — IP адрес, с которого пришел запрос.
|
||||
- `port` (UInt16) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` (String) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` (String) — ID родительского запроса.
|
||||
- `initial_address` (IPv6) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` (UInt16) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` (UInt8) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID запроса.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — имя пользователя в OS, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` (String) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` (UInt8) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — имя пользователя операционной системы, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` ([UInt8](../sql-reference/data-types/int-uint.md)) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- 0 — запрос запущен с интерфейса TCP.
|
||||
- 1 — `GET`.
|
||||
- 2 — `POST`.
|
||||
- `http_user_agent` (String) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` (String) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` (UInt32) — ревизия ClickHouse.
|
||||
- `thread_numbers` (Array(UInt32)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` (Array(String)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` (Array(String)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
|
||||
Каждый запрос создаёт одну или две строки в таблице `query_log`, в зависимости от статуса запроса:
|
||||
**Пример**
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа 1 и 2 (смотрите столбец `type`).
|
||||
2. Если во время обработки запроса произошла ошибка, создаются два события с типами 1 и 4.
|
||||
3. Если ошибка произошла до запуска запроса, создается одно событие с типом 3.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
```
|
||||
**Смотрите также**
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The «quota key» specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
\#\# system.query\_thread\_log {\#system\_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке выполняемых запросов.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_query\_threads](settings/settings.md#settings-log-query-threads) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
|
||||
@ -770,16 +747,16 @@ ClickHouse создаёт таблицу только в том случае, к
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`.
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (параметр `partition_by`).
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (параметр `partition_by`).
|
||||
|
||||
## system.query_thread_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке исполнения запроса.
|
||||
|
||||
|
@ -73,10 +73,10 @@ Upd. Включено для системных таблиц.
|
||||
Q1. Закоммичено, но есть технический долг, который исправляется сейчас.
|
||||
Готово. Нет, не готово - там всё ещё технический долг.
|
||||
|
||||
### 1.9. Использование TTL для прореживания данных {#ispolzovanie-ttl-dlia-prorezhivaniia-dannykh}
|
||||
### 1.9. + Использование TTL для прореживания данных {#ispolzovanie-ttl-dlia-prorezhivaniia-dannykh}
|
||||
|
||||
Будет делать Сорокин Николай, ВШЭ и Яндекс.
|
||||
Upd. Есть pull request.
|
||||
Upd. Есть pull request. Upd. Сделано.
|
||||
|
||||
Сейчас пользователь может задать в таблице выражение, которое определяет, сколько времени хранятся данные. Обычно это выражение задаётся относительно значения столбца с датой - например: удалять данные через три месяца. https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/\#table_engine-mergetree-ttl
|
||||
|
||||
@ -124,7 +124,7 @@ Q2.
|
||||
Upd. Олег будет делать только часть про HDFS.
|
||||
Upd. Реализация поверх S3 является рабочей на уровне PoC.
|
||||
|
||||
### 1.13. Ускорение запросов с FINAL {#uskorenie-zaprosov-s-final}
|
||||
### 1.13. + Ускорение запросов с FINAL {#uskorenie-zaprosov-s-final}
|
||||
|
||||
Требует 2.1. Делает [Николай Кочетов](https://github.com/KochetovNicolai). Нужно для Яндекс.Метрики. Q2.
|
||||
Upd: PR [#10463](https://github.com/ClickHouse/ClickHouse/pull/10463)
|
||||
@ -203,10 +203,11 @@ Upd. SharedContext вынесен из Context.
|
||||
|
||||
Upd. В очереди. Иван Лежанкин.
|
||||
|
||||
### 2.9. Логгировние в format-стиле {#loggirovnie-v-format-stile}
|
||||
### 2.9. + Логгировние в format-стиле {#loggirovnie-v-format-stile}
|
||||
|
||||
Делает [Иван Лежанкин](https://github.com/abyss7). Низкий приоритет.
|
||||
[\#6049](https://github.com/ClickHouse/ClickHouse/issues/6049#issuecomment-570836998)
|
||||
[#6049](https://github.com/ClickHouse/ClickHouse/issues/6049#issuecomment-570836998)
|
||||
|
||||
Сделано.
|
||||
|
||||
### 2.10. Запрашивать у таблиц не столбцы, а срезы {#zaprashivat-u-tablits-ne-stolbtsy-a-srezy}
|
||||
|
||||
@ -282,24 +283,20 @@ Upd. Сейчас обсуждается, как сделать другую з
|
||||
|
||||
### 4.3. Ограничение числа одновременных скачиваний с реплик {#ogranichenie-chisla-odnovremennykh-skachivanii-s-replik}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
Изначально делал Олег Алексеенков, но пока решение не готово, хотя там не так уж много доделывать.
|
||||
|
||||
### 4.4. Ограничение сетевой полосы при репликации {#ogranichenie-setevoi-polosy-pri-replikatsii}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ. Нужно для Метрики.
|
||||
Нужно для Метрики.
|
||||
|
||||
### 4.5. Возможность продолжить передачу куска данных при репликации после сбоя {#vozmozhnost-prodolzhit-peredachu-kuska-dannykh-pri-replikatsii-posle-sboia}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
|
||||
### 4.6. p2p передача для GLOBAL подзапросов {#p2p-peredacha-dlia-global-podzaprosov}
|
||||
|
||||
### 4.7. Ленивая загрузка множеств для IN и JOIN с помощью k/v запросов {#lenivaia-zagruzka-mnozhestv-dlia-in-i-join-s-pomoshchiu-kv-zaprosov}
|
||||
|
||||
### 4.8. Разделить background pool для fetch и merge {#razdelit-background-pool-dlia-fetch-i-merge}
|
||||
|
||||
Дмитрий Григорьев, ВШЭ.
|
||||
В очереди. Исправить проблему, что восстанавливающаяся реплика перестаёт мержить. Частично компенсируется 4.3.
|
||||
|
||||
|
||||
@ -329,6 +326,7 @@ Upd. Сделано. Эффективность работы под вопрос
|
||||
Метрика, БК, Маркет, Altinity уже используют более свежие версии чем LTS.
|
||||
Upd. Появилась вторая версия LTS - 20.3.
|
||||
|
||||
|
||||
## 6. Инструментирование {#instrumentirovanie}
|
||||
|
||||
### 6.1. + Исправления сэмплирующего профайлера запросов {#ispravleniia-sempliruiushchego-profailera-zaprosov}
|
||||
@ -425,11 +423,11 @@ Upd. Рассмотрели все проверки подряд.
|
||||
|
||||
UBSan включен в функциональных тестах, но не включен в интеграционных тестах. Требует 7.7.
|
||||
|
||||
### 7.11. Включение \*San в unit тестах {#vkliuchenie-san-v-unit-testakh}
|
||||
### 7.11. + Включение \*San в unit тестах {#vkliuchenie-san-v-unit-testakh}
|
||||
|
||||
У нас мало unit тестов по сравнению с функциональными тестами и их использование не обязательно. Но они всё-равно важны и нет причин не запускать их под всеми видами sanitizers.
|
||||
|
||||
Илья Яцишин.
|
||||
Илья Яцишин. Сделано.
|
||||
|
||||
### 7.12. Показывать тестовое покрытие нового кода в PR {#pokazyvat-testovoe-pokrytie-novogo-koda-v-pr}
|
||||
|
||||
@ -528,6 +526,8 @@ Upd. Есть сборки, [пример](https://clickhouse-builds.s3.yandex.n
|
||||
|
||||
Дарья Петрова, УрФУ.
|
||||
|
||||
Рабочий прототип: https://pulls-dashboard-demo.herokuapp.com/dashboard/ClickHouse/ClickHouse
|
||||
|
||||
Над ClickHouse одновременно работает большое количество разработчиков, которые оформляют свои изменения в виде pull requests. Когда непомерженных pull requests много, то возникает сложность с организацией работы - непонятно, на какой pull request смотреть в первую очередь.
|
||||
|
||||
Предлагается реализовать простое одностраничное веб-приложение, в котором отображается список pull requests со следующей информацией:
|
||||
@ -627,6 +627,7 @@ Upd. Готово (все директории кроме contrib).
|
||||
### 7.32. Обфускация продакшен запросов {#obfuskatsiia-prodakshen-zaprosov}
|
||||
|
||||
Роман Ильговский. Нужно для Яндекс.Метрики.
|
||||
Есть pull request, почти готово: https://github.com/ClickHouse/ClickHouse/pull/10973
|
||||
|
||||
Имея SQL запрос, требуется вывести структуру таблиц, на которых этот запрос будет выполнен, и заполнить эти таблицы случайными данными, такими, что результат этого запроса зависит от выбора подмножества данных.
|
||||
|
||||
@ -1397,11 +1398,11 @@ Constraints позволяют задать выражение, истиннос
|
||||
Василий Морозов, Арслан Гумеров, Альберт Кидрачев, ВШЭ.
|
||||
В прошлом году задачу начинал делать другой человек, но не добился достаточного прогресса.
|
||||
|
||||
1. Оптимизация top sort.
|
||||
+ 1. Оптимизация top sort.
|
||||
|
||||
В ClickHouse используется неоптимальный вариант top sort. Суть его в том, что из каждого блока достаётся top N записей, а затем, все блоки мержатся. Но доставание top N записей у каждого следующего блока бессмысленно, если мы знаем, что из них в глобальный top N войдёт меньше. Конечно нужно реализовать вариацию на тему priority queue (heap) с быстрым пропуском целых блоков, если ни одна строка не попадёт в накопленный top.
|
||||
|
||||
2. Рекурсивный вариант сортировки по кортежам.
|
||||
+ 2. Рекурсивный вариант сортировки по кортежам.
|
||||
|
||||
Для сортировки по кортежам используется обычная сортировка с компаратором, который в цикле по элементам кортежа делает виртуальные вызовы `IColumn::compareAt`. Это неоптимально - как из-за короткого цикла по неизвестному в compile-time количеству элементов, так и из-за виртуальных вызовов. Чтобы обойтись без виртуальных вызовов, есть метод `IColumn::getPermutation`. Он используется в случае сортировки по одному столбцу. Есть вариант, что в случае сортировки по кортежу, что-то похожее тоже можно применить… например, сделать метод `updatePermutation`, принимающий аргументы offset и limit, и допереставляющий перестановку в диапазоне значений, в которых предыдущий столбец имел равные значения.
|
||||
|
||||
|
@ -21,7 +21,7 @@ mkdocs-htmlproofer-plugin==0.0.3
|
||||
mkdocs-macros-plugin==0.4.9
|
||||
nltk==3.5
|
||||
nose==1.3.7
|
||||
protobuf==3.12.1
|
||||
protobuf==3.12.2
|
||||
numpy==1.18.4
|
||||
Pygments==2.5.2
|
||||
pymdown-extensions==7.1
|
||||
|
@ -6,14 +6,9 @@ set(CLICKHOUSE_CLIENT_SOURCES
|
||||
|
||||
set(CLICKHOUSE_CLIENT_LINK PRIVATE clickhouse_common_config clickhouse_functions clickhouse_aggregate_functions clickhouse_common_io clickhouse_parsers string_utils ${Boost_PROGRAM_OPTIONS_LIBRARY})
|
||||
|
||||
include(CheckSymbolExists)
|
||||
check_symbol_exists(readpassphrase readpassphrase.h HAVE_READPASSPHRASE)
|
||||
configure_file(config_client.h.in ${ConfigIncludePath}/config_client.h)
|
||||
|
||||
if(NOT HAVE_READPASSPHRASE)
|
||||
add_subdirectory(readpassphrase)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
endif()
|
||||
# Always use internal readpassphrase
|
||||
add_subdirectory(readpassphrase)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
|
||||
clickhouse_program_add(client)
|
||||
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
@ -77,6 +76,10 @@
|
||||
#include <common/argsToConfig.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
#endif
|
||||
|
||||
#ifndef __clang__
|
||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
||||
#endif
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <common/setTerminalEcho.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <readpassphrase.h>
|
||||
#include "readpassphrase/readpassphrase.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,3 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#cmakedefine HAVE_READPASSPHRASE
|
@ -1,13 +1,7 @@
|
||||
|
||||
# wget https://raw.githubusercontent.com/openssh/openssh-portable/master/openbsd-compat/readpassphrase.c
|
||||
# wget https://raw.githubusercontent.com/openssh/openssh-portable/master/openbsd-compat/readpassphrase.h
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-result -Wno-reserved-id-macro")
|
||||
add_library(readpassphrase readpassphrase.c)
|
||||
|
||||
configure_file(includes.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/includes.h)
|
||||
add_library(readpassphrase ${CMAKE_CURRENT_SOURCE_DIR}/readpassphrase.c)
|
||||
set_target_properties(readpassphrase
|
||||
PROPERTIES LINKER_LANGUAGE C
|
||||
)
|
||||
# . to allow #include <readpassphrase.h>
|
||||
target_include_directories(readpassphrase PUBLIC . ${CMAKE_CURRENT_BINARY_DIR}/include)
|
||||
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
||||
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#cmakedefine HAVE_READPASSPHRASE
|
||||
/* #undef HAVE_READPASSPHRASE */
|
||||
|
||||
#if !defined(HAVE_READPASSPHRASE)
|
||||
# ifndef _PATH_TTY
|
@ -25,13 +25,11 @@
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
#ifndef HAVE_READPASSPHRASE
|
||||
|
||||
#include <termios.h>
|
||||
#include <signal.h>
|
||||
#include <ctype.h>
|
||||
#include <fcntl.h>
|
||||
#include <readpassphrase.h>
|
||||
#include "readpassphrase.h"
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
@ -193,19 +191,7 @@ restart:
|
||||
}
|
||||
//DEF_WEAK(readpassphrase);
|
||||
|
||||
#if 0
|
||||
char *
|
||||
getpass(const char *prompt)
|
||||
{
|
||||
static char buf[_PASSWORD_LEN + 1];
|
||||
|
||||
return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void handler(int s)
|
||||
{
|
||||
|
||||
signo[s] = 1;
|
||||
}
|
||||
#endif /* HAVE_READPASSPHRASE */
|
||||
|
@ -23,39 +23,22 @@
|
||||
/* OPENBSD ORIGINAL: include/readpassphrase.h */
|
||||
|
||||
#pragma once
|
||||
// #ifndef _READPASSPHRASE_H_
|
||||
// #define _READPASSPHRASE_H_
|
||||
|
||||
//#include "includes.h"
|
||||
#include "config_client.h"
|
||||
|
||||
// Should not be included on BSD systems, but if it happen...
|
||||
#ifdef HAVE_READPASSPHRASE
|
||||
# include_next <readpassphrase.h>
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HAVE_READPASSPHRASE
|
||||
|
||||
# ifdef __cplusplus
|
||||
extern "C" {
|
||||
# endif
|
||||
|
||||
|
||||
# define RPP_ECHO_OFF 0x00 /* Turn off echo (default). */
|
||||
# define RPP_ECHO_ON 0x01 /* Leave echo on. */
|
||||
# define RPP_REQUIRE_TTY 0x02 /* Fail if there is no tty. */
|
||||
# define RPP_FORCELOWER 0x04 /* Force input to lower case. */
|
||||
# define RPP_FORCEUPPER 0x08 /* Force input to upper case. */
|
||||
# define RPP_SEVENBIT 0x10 /* Strip the high bit from input. */
|
||||
# define RPP_STDIN 0x20 /* Read from stdin, not /dev/tty */
|
||||
#define RPP_ECHO_OFF 0x00 /* Turn off echo (default). */
|
||||
#define RPP_ECHO_ON 0x01 /* Leave echo on. */
|
||||
#define RPP_REQUIRE_TTY 0x02 /* Fail if there is no tty. */
|
||||
#define RPP_FORCELOWER 0x04 /* Force input to lower case. */
|
||||
#define RPP_FORCEUPPER 0x08 /* Force input to upper case. */
|
||||
#define RPP_SEVENBIT 0x10 /* Strip the high bit from input. */
|
||||
#define RPP_STDIN 0x20 /* Read from stdin, not /dev/tty */
|
||||
|
||||
char * readpassphrase(const char *, char *, size_t, int);
|
||||
|
||||
# ifdef __cplusplus
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
# endif
|
||||
|
||||
|
||||
#endif /* HAVE_READPASSPHRASE */
|
||||
|
||||
// #endif /* !_READPASSPHRASE_H_ */
|
||||
#endif
|
||||
|
7
programs/client/readpassphrase/ya.make
Normal file
7
programs/client/readpassphrase/ya.make
Normal file
@ -0,0 +1,7 @@
|
||||
LIBRARY()
|
||||
|
||||
SRCS(
|
||||
readpassphrase.c
|
||||
)
|
||||
|
||||
END()
|
@ -8,11 +8,8 @@
|
||||
#include <string>
|
||||
#include <utility> /// pair
|
||||
|
||||
#if __has_include("config_tools.h")
|
||||
#include "config_tools.h"
|
||||
#endif
|
||||
#if __has_include("config_core.h")
|
||||
#include "config_core.h"
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_tools.h"
|
||||
#endif
|
||||
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
@ -22,31 +19,31 @@
|
||||
|
||||
|
||||
/// Universal executable for various clickhouse applications
|
||||
#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER)
|
||||
#if ENABLE_CLICKHOUSE_SERVER
|
||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT)
|
||||
#if ENABLE_CLICKHOUSE_CLIENT
|
||||
int mainEntryClickHouseClient(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL)
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
int mainEntryClickHouseLocal(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK)
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
||||
int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR)
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
||||
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT)
|
||||
#if ENABLE_CLICKHOUSE_FORMAT
|
||||
int mainEntryClickHouseFormat(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER)
|
||||
#if ENABLE_CLICKHOUSE_COPIER
|
||||
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR)
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
||||
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
||||
#endif
|
||||
|
||||
@ -60,31 +57,31 @@ using MainFunc = int (*)(int, char**);
|
||||
/// Add an item here to register new application
|
||||
std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
{
|
||||
#if ENABLE_CLICKHOUSE_LOCAL || !defined(ENABLE_CLICKHOUSE_LOCAL)
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
{"local", mainEntryClickHouseLocal},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_CLIENT || !defined(ENABLE_CLICKHOUSE_CLIENT)
|
||||
#if ENABLE_CLICKHOUSE_CLIENT
|
||||
{"client", mainEntryClickHouseClient},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK || !defined(ENABLE_CLICKHOUSE_BENCHMARK)
|
||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
||||
{"benchmark", mainEntryClickHouseBenchmark},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_SERVER || !defined(ENABLE_CLICKHOUSE_SERVER)
|
||||
#if ENABLE_CLICKHOUSE_SERVER
|
||||
{"server", mainEntryClickHouseServer},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG || !defined(ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
{"extract-from-config", mainEntryClickHouseExtractFromConfig},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR || !defined(ENABLE_CLICKHOUSE_COMPRESSOR)
|
||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
||||
{"compressor", mainEntryClickHouseCompressor},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_FORMAT || !defined(ENABLE_CLICKHOUSE_FORMAT)
|
||||
#if ENABLE_CLICKHOUSE_FORMAT
|
||||
{"format", mainEntryClickHouseFormat},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_COPIER || !defined(ENABLE_CLICKHOUSE_COPIER)
|
||||
#if ENABLE_CLICKHOUSE_COPIER
|
||||
{"copier", mainEntryClickHouseClusterCopier},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR || !defined(ENABLE_CLICKHOUSE_OBFUSCATOR)
|
||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
||||
{"obfuscator", mainEntryClickHouseObfuscator},
|
||||
#endif
|
||||
};
|
||||
@ -127,9 +124,10 @@ enum class InstructionFail
|
||||
SSSE3 = 2,
|
||||
SSE4_1 = 3,
|
||||
SSE4_2 = 4,
|
||||
AVX = 5,
|
||||
AVX2 = 6,
|
||||
AVX512 = 7
|
||||
POPCNT = 5,
|
||||
AVX = 6,
|
||||
AVX2 = 7,
|
||||
AVX512 = 8
|
||||
};
|
||||
|
||||
const char * instructionFailToString(InstructionFail fail)
|
||||
@ -146,6 +144,8 @@ const char * instructionFailToString(InstructionFail fail)
|
||||
return "SSE4.1";
|
||||
case InstructionFail::SSE4_2:
|
||||
return "SSE4.2";
|
||||
case InstructionFail::POPCNT:
|
||||
return "POPCNT";
|
||||
case InstructionFail::AVX:
|
||||
return "AVX";
|
||||
case InstructionFail::AVX2:
|
||||
@ -189,6 +189,16 @@ void checkRequiredInstructionsImpl(volatile InstructionFail & fail)
|
||||
__asm__ volatile ("pcmpgtq %%xmm0, %%xmm0" : : : "xmm0");
|
||||
#endif
|
||||
|
||||
/// Defined by -msse4.2
|
||||
#if defined(__POPCNT__)
|
||||
fail = InstructionFail::POPCNT;
|
||||
{
|
||||
uint64_t a = 0;
|
||||
uint64_t b = 0;
|
||||
__asm__ volatile ("popcnt %1, %0" : "=r"(a) :"r"(b) :);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__)
|
||||
fail = InstructionFail::AVX;
|
||||
__asm__ volatile ("vaddpd %%ymm0, %%ymm0, %%ymm0" : : : "ymm0");
|
||||
|
@ -1,21 +1,6 @@
|
||||
set(CLICKHOUSE_SERVER_SOURCES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/HTTPHandlerFactory.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/InterserverIOHTTPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MetricsTransmitter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/NotFoundHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/PrometheusMetricsWriter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/PrometheusRequestHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ReplicasStatusHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/StaticRequestHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Server.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/TCPHandler.cpp
|
||||
)
|
||||
|
||||
set(CLICKHOUSE_SERVER_SOURCES
|
||||
${CLICKHOUSE_SERVER_SOURCES}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/MySQLHandlerFactory.cpp
|
||||
MetricsTransmitter.cpp
|
||||
Server.cpp
|
||||
)
|
||||
|
||||
set (CLICKHOUSE_SERVER_LINK
|
||||
|
@ -53,13 +53,13 @@
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
#include "HTTPHandlerFactory.h"
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include "MetricsTransmitter.h"
|
||||
#include <Common/StatusFile.h>
|
||||
#include "TCPHandlerFactory.h"
|
||||
#include <Server/TCPHandlerFactory.h>
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
#include "MySQLHandlerFactory.h"
|
||||
#include <Server/MySQLHandlerFactory.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_core.h"
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "IServer.h"
|
||||
#include <Server/IServer.h>
|
||||
|
||||
#include <daemon/BaseDaemon.h>
|
||||
|
||||
|
@ -11,19 +11,8 @@ PEERDIR(
|
||||
SRCS(
|
||||
clickhouse-server.cpp
|
||||
|
||||
HTTPHandler.cpp
|
||||
HTTPHandlerFactory.cpp
|
||||
InterserverIOHTTPHandler.cpp
|
||||
MetricsTransmitter.cpp
|
||||
MySQLHandler.cpp
|
||||
MySQLHandlerFactory.cpp
|
||||
NotFoundHandler.cpp
|
||||
PrometheusMetricsWriter.cpp
|
||||
PrometheusRequestHandler.cpp
|
||||
ReplicasStatusHandler.cpp
|
||||
StaticRequestHandler.cpp
|
||||
Server.cpp
|
||||
TCPHandler.cpp
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -1,3 +1,27 @@
|
||||
RECURSE(
|
||||
server
|
||||
PROGRAM(clickhouse)
|
||||
|
||||
CFLAGS(
|
||||
-DENABLE_CLICKHOUSE_CLIENT
|
||||
-DENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
||||
-DENABLE_CLICKHOUSE_SERVER
|
||||
)
|
||||
|
||||
PEERDIR(
|
||||
clickhouse/base/daemon
|
||||
clickhouse/base/loggers
|
||||
clickhouse/programs/client/readpassphrase
|
||||
clickhouse/src
|
||||
)
|
||||
|
||||
SRCS(
|
||||
main.cpp
|
||||
|
||||
client/Client.cpp
|
||||
client/ConnectionParameters.cpp
|
||||
client/Suggest.cpp
|
||||
extract-from-config/ExtractFromConfig.cpp
|
||||
server/Server.cpp
|
||||
server/MetricsTransmitter.cpp
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -7,8 +7,8 @@ namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -36,8 +36,11 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
|
||||
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
return password_hash;
|
||||
|
||||
case MAX_TYPE:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("getPasswordDoubleSHA1(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
@ -71,8 +74,11 @@ bool Authentication::isCorrectPassword(const String & password_) const
|
||||
|
||||
return encodeSHA1(first_sha1) == password_hash;
|
||||
}
|
||||
|
||||
case MAX_TYPE:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Cannot check if the password is correct for authentication type " + toString(type), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/OpenSSLHelpers.h>
|
||||
#include <Poco/SHA1Engine.h>
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -14,6 +15,7 @@ namespace ErrorCodes
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -35,6 +37,15 @@ public:
|
||||
/// SHA1(SHA1(password)).
|
||||
/// This kind of hash is used by the `mysql_native_password` authentication plugin.
|
||||
DOUBLE_SHA1_PASSWORD,
|
||||
|
||||
MAX_TYPE,
|
||||
};
|
||||
|
||||
struct TypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "sha256_password".
|
||||
static const TypeInfo & get(Type type_);
|
||||
};
|
||||
|
||||
using Digest = std::vector<uint8_t>;
|
||||
@ -85,6 +96,48 @@ private:
|
||||
};
|
||||
|
||||
|
||||
inline const Authentication::TypeInfo & Authentication::TypeInfo::get(Type type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
return TypeInfo{raw_name_, std::move(init_name)};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case NO_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("NO_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case PLAINTEXT_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("PLAINTEXT_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case SHA256_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("SHA256_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
{
|
||||
static const auto info = make_info("DOUBLE_SHA1_PASSWORD");
|
||||
return info;
|
||||
}
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
inline String toString(Authentication::Type type_)
|
||||
{
|
||||
return Authentication::TypeInfo::get(type_).raw_name;
|
||||
}
|
||||
|
||||
|
||||
inline Authentication::Digest Authentication::encodeSHA256(const std::string_view & text [[maybe_unused]])
|
||||
{
|
||||
#if USE_SSL
|
||||
@ -122,8 +175,10 @@ inline void Authentication::setPassword(const String & password_)
|
||||
|
||||
case DOUBLE_SHA1_PASSWORD:
|
||||
return setPasswordHashBinary(encodeDoubleSHA1(password_));
|
||||
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
@ -186,8 +241,10 @@ inline void Authentication::setPasswordHashBinary(const Digest & hash)
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
|
||||
case MAX_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -68,15 +68,27 @@ void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlMa
|
||||
{
|
||||
all = ast.all;
|
||||
|
||||
auto name_to_id = [id_mode{ast.id_mode}, manager](const String & name) -> UUID
|
||||
auto name_to_id = [&ast, manager](const String & name) -> UUID
|
||||
{
|
||||
if (id_mode)
|
||||
if (ast.id_mode)
|
||||
return parse<UUID>(name);
|
||||
assert(manager);
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
if (ast.can_contain_users && ast.can_contain_roles)
|
||||
{
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
else if (ast.can_contain_users)
|
||||
{
|
||||
return manager->getID<User>(name);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(ast.can_contain_roles);
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
};
|
||||
|
||||
if (!ast.names.empty() && !all)
|
||||
|
@ -143,6 +143,14 @@ const IAccessStorage & MultipleAccessStorage::getStorage(const UUID & id) const
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorage(id);
|
||||
}
|
||||
|
||||
void MultipleAccessStorage::addStorage(std::unique_ptr<Storage> nested_storage)
|
||||
{
|
||||
/// Note that IStorage::storage_name is not changed. It is ok as this method
|
||||
/// is considered as a temporary solution allowing third-party Arcadia applications
|
||||
/// using CH as a library to register their own access storages. Do not remove
|
||||
/// this method without providing any alternative :)
|
||||
nested_storages.emplace_back(std::move(nested_storage));
|
||||
}
|
||||
|
||||
AccessEntityPtr MultipleAccessStorage::readImpl(const UUID & id) const
|
||||
{
|
||||
|
@ -25,6 +25,8 @@ public:
|
||||
const Storage & getStorage(const UUID & id) const;
|
||||
Storage & getStorage(const UUID & id);
|
||||
|
||||
void addStorage(std::unique_ptr<Storage> nested_storage);
|
||||
|
||||
Storage & getStorageByIndex(size_t i) { return *(nested_storages[i]); }
|
||||
const Storage & getStorageByIndex(size_t i) const { return *(nested_storages[i]); }
|
||||
|
||||
|
@ -23,18 +23,10 @@ struct EntropyData
|
||||
{
|
||||
using Weight = UInt64;
|
||||
|
||||
using HashingMap = HashMap<
|
||||
Value, Weight,
|
||||
HashCRC32<Value>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>>;
|
||||
using HashingMap = HashMapWithStackMemory<Value, Weight, HashCRC32<Value>, 4>;
|
||||
|
||||
/// For the case of pre-hashed values.
|
||||
using TrivialMap = HashMap<
|
||||
Value, Weight,
|
||||
UInt128TrivialHash,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>>;
|
||||
using TrivialMap = HashMapWithStackMemory<Value, Weight, UInt128TrivialHash, 4>;
|
||||
|
||||
using Map = std::conditional_t<std::is_same_v<UInt128, Value>, TrivialMap, HashingMap>;
|
||||
|
||||
|
@ -28,12 +28,7 @@ template <typename T>
|
||||
struct AggregateFunctionGroupUniqArrayData
|
||||
{
|
||||
/// When creating, the hash table must be small.
|
||||
using Set = HashSet<
|
||||
T,
|
||||
DefaultHash<T>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(T) * (1 << 4)>
|
||||
>;
|
||||
using Set = HashSetWithStackMemory<T, DefaultHash<T>, 4>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
@ -126,9 +121,10 @@ public:
|
||||
/// Generic implementation, it uses serialized representation as object descriptor.
|
||||
struct AggregateFunctionGroupUniqArrayGenericData
|
||||
{
|
||||
static constexpr size_t INIT_ELEMS = 2; /// adjustable
|
||||
static constexpr size_t ELEM_SIZE = sizeof(HashSetCellWithSavedHash<StringRef, StringRefHash>);
|
||||
using Set = HashSetWithSavedHash<StringRef, StringRefHash, HashTableGrower<INIT_ELEMS>, HashTableAllocatorWithStackMemory<INIT_ELEMS * ELEM_SIZE>>;
|
||||
static constexpr size_t INITIAL_SIZE_DEGREE = 3; /// adjustable
|
||||
|
||||
using Set = HashSetWithSavedHashWithStackMemory<StringRef, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
@ -23,13 +23,8 @@ namespace DB
|
||||
template <typename T>
|
||||
struct AggregateFunctionTopKData
|
||||
{
|
||||
using Set = SpaceSaving
|
||||
<
|
||||
T,
|
||||
HashCRC32<T>,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(T) * (1 << 4)>
|
||||
>;
|
||||
using Set = SpaceSaving<T, HashCRC32<T>>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
||||
@ -109,13 +104,7 @@ public:
|
||||
/// Generic implementation, it uses serialized representation as object descriptor.
|
||||
struct AggregateFunctionTopKGenericData
|
||||
{
|
||||
using Set = SpaceSaving
|
||||
<
|
||||
StringRef,
|
||||
StringRefHash,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(StringRef) * (1 << 4)>
|
||||
>;
|
||||
using Set = SpaceSaving<StringRef, StringRefHash>;
|
||||
|
||||
Set value;
|
||||
};
|
||||
|
@ -33,12 +33,7 @@ struct QuantileExactWeighted
|
||||
using Hasher = std::conditional_t<std::is_same_v<Value, Decimal128>, Int128Hash, HashCRC32<UnderlyingType>>;
|
||||
|
||||
/// When creating, the hash table must be small.
|
||||
using Map = HashMap<
|
||||
UnderlyingType, Weight,
|
||||
Hasher,
|
||||
HashTableGrower<4>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<Value, Weight>) * (1 << 3)>
|
||||
>;
|
||||
using Map = HashMapWithStackMemory<UnderlyingType, Weight, Hasher, 4>;
|
||||
|
||||
Map map;
|
||||
|
||||
|
@ -58,6 +58,7 @@ add_subdirectory (TableFunctions)
|
||||
add_subdirectory (Processors)
|
||||
add_subdirectory (Formats)
|
||||
add_subdirectory (Compression)
|
||||
add_subdirectory (Server)
|
||||
|
||||
|
||||
set(dbms_headers)
|
||||
@ -145,6 +146,7 @@ add_object_library(clickhouse_storages_distributed Storages/Distributed)
|
||||
add_object_library(clickhouse_storages_mergetree Storages/MergeTree)
|
||||
add_object_library(clickhouse_storages_liveview Storages/LiveView)
|
||||
add_object_library(clickhouse_client Client)
|
||||
add_object_library(clickhouse_server Server)
|
||||
add_object_library(clickhouse_formats Formats)
|
||||
add_object_library(clickhouse_processors Processors)
|
||||
add_object_library(clickhouse_processors_executors Processors/Executors)
|
||||
|
@ -278,13 +278,15 @@ private:
|
||||
|
||||
/** Allocator with optimization to place small memory ranges in automatic memory.
|
||||
*/
|
||||
template <typename Base, size_t N, size_t Alignment>
|
||||
template <typename Base, size_t _initial_bytes, size_t Alignment>
|
||||
class AllocatorWithStackMemory : private Base
|
||||
{
|
||||
private:
|
||||
alignas(Alignment) char stack_memory[N];
|
||||
alignas(Alignment) char stack_memory[_initial_bytes];
|
||||
|
||||
public:
|
||||
static constexpr size_t initial_bytes = _initial_bytes;
|
||||
|
||||
/// Do not use boost::noncopyable to avoid the warning about direct base
|
||||
/// being inaccessible due to ambiguity, when derived classes are also
|
||||
/// noncopiable (-Winaccessible-base).
|
||||
@ -295,10 +297,10 @@ public:
|
||||
|
||||
void * alloc(size_t size)
|
||||
{
|
||||
if (size <= N)
|
||||
if (size <= initial_bytes)
|
||||
{
|
||||
if constexpr (Base::clear_memory)
|
||||
memset(stack_memory, 0, N);
|
||||
memset(stack_memory, 0, initial_bytes);
|
||||
return stack_memory;
|
||||
}
|
||||
|
||||
@ -307,18 +309,18 @@ public:
|
||||
|
||||
void free(void * buf, size_t size)
|
||||
{
|
||||
if (size > N)
|
||||
if (size > initial_bytes)
|
||||
Base::free(buf, size);
|
||||
}
|
||||
|
||||
void * realloc(void * buf, size_t old_size, size_t new_size)
|
||||
{
|
||||
/// Was in stack_memory, will remain there.
|
||||
if (new_size <= N)
|
||||
if (new_size <= initial_bytes)
|
||||
return buf;
|
||||
|
||||
/// Already was big enough to not fit in stack_memory.
|
||||
if (old_size > N)
|
||||
if (old_size > initial_bytes)
|
||||
return Base::realloc(buf, old_size, new_size, Alignment);
|
||||
|
||||
/// Was in stack memory, but now will not fit there.
|
||||
@ -330,10 +332,20 @@ public:
|
||||
protected:
|
||||
static constexpr size_t getStackThreshold()
|
||||
{
|
||||
return N;
|
||||
return initial_bytes;
|
||||
}
|
||||
};
|
||||
|
||||
// A constant that gives the number of initially available bytes in
|
||||
// the allocator. Used to check that this number is in sync with the
|
||||
// initial size of array or hash table that uses the allocator.
|
||||
template<typename TAllocator>
|
||||
constexpr size_t allocatorInitialBytes = 0;
|
||||
|
||||
template<typename Base, size_t initial_bytes, size_t Alignment>
|
||||
constexpr size_t allocatorInitialBytes<AllocatorWithStackMemory<
|
||||
Base, initial_bytes, Alignment>> = initial_bytes;
|
||||
|
||||
|
||||
#if !__clang__
|
||||
#pragma GCC diagnostic pop
|
||||
|
@ -94,6 +94,12 @@ namespace CurrentMetrics
|
||||
amount = new_amount;
|
||||
}
|
||||
|
||||
void sub(Value value = 1)
|
||||
{
|
||||
what->fetch_sub(value, std::memory_order_relaxed);
|
||||
amount -= value;
|
||||
}
|
||||
|
||||
/// Subtract value before destructor.
|
||||
void destroy()
|
||||
{
|
||||
|
@ -43,3 +43,14 @@ public:
|
||||
this->m_size = 0;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Key, typename Mapped, typename Hash,
|
||||
size_t initial_size_degree>
|
||||
using ClearableHashMapWithStackMemory = ClearableHashMap<
|
||||
Key,
|
||||
Mapped,
|
||||
Hash,
|
||||
HashTableGrower<initial_size_degree>,
|
||||
HashTableAllocatorWithStackMemory<
|
||||
(1ULL << initial_size_degree)
|
||||
* sizeof(ClearableHashMapCell<Key, Mapped, Hash>)>>;
|
||||
|
@ -84,3 +84,15 @@ public:
|
||||
this->m_size = 0;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Key, typename Hash, size_t initial_size_degree>
|
||||
using ClearableHashSetWithStackMemory = ClearableHashSet<
|
||||
Key,
|
||||
Hash,
|
||||
HashTableGrower<initial_size_degree>,
|
||||
HashTableAllocatorWithStackMemory<
|
||||
(1ULL << initial_size_degree)
|
||||
* sizeof(
|
||||
ClearableHashTableCell<
|
||||
Key,
|
||||
HashTableCell<Key, Hash, ClearableHashSetState>>)>>;
|
||||
|
@ -239,3 +239,14 @@ template <
|
||||
typename Grower = HashTableGrower<>,
|
||||
typename Allocator = HashTableAllocator>
|
||||
using HashMapWithSavedHash = HashMapTable<Key, HashMapCellWithSavedHash<Key, Mapped, Hash>, Hash, Grower, Allocator>;
|
||||
|
||||
template <typename Key, typename Mapped, typename Hash,
|
||||
size_t initial_size_degree>
|
||||
using HashMapWithStackMemory = HashMapTable<
|
||||
Key,
|
||||
HashMapCellWithSavedHash<Key, Mapped, Hash>,
|
||||
Hash,
|
||||
HashTableGrower<initial_size_degree>,
|
||||
HashTableAllocatorWithStackMemory<
|
||||
(1ULL << initial_size_degree)
|
||||
* sizeof(HashMapCellWithSavedHash<Key, Mapped, Hash>)>>;
|
||||
|
@ -93,6 +93,14 @@ template
|
||||
>
|
||||
using HashSet = HashSetTable<Key, HashTableCell<Key, Hash>, Hash, Grower, Allocator>;
|
||||
|
||||
template <typename Key, typename Hash, size_t initial_size_degree>
|
||||
using HashSetWithStackMemory = HashSet<
|
||||
Key,
|
||||
Hash,
|
||||
HashTableGrower<initial_size_degree>,
|
||||
HashTableAllocatorWithStackMemory<
|
||||
(1ULL << initial_size_degree)
|
||||
* sizeof(HashTableCell<Key, Hash>)>>;
|
||||
|
||||
template
|
||||
<
|
||||
@ -102,3 +110,12 @@ template
|
||||
typename Allocator = HashTableAllocator
|
||||
>
|
||||
using HashSetWithSavedHash = HashSetTable<Key, HashSetCellWithSavedHash<Key, Hash>, Hash, Grower, Allocator>;
|
||||
|
||||
template <typename Key, typename Hash, size_t initial_size_degree>
|
||||
using HashSetWithSavedHashWithStackMemory = HashSetWithSavedHash<
|
||||
Key,
|
||||
Hash,
|
||||
HashTableGrower<initial_size_degree>,
|
||||
HashTableAllocatorWithStackMemory<
|
||||
(1ULL << initial_size_degree)
|
||||
* sizeof(HashSetCellWithSavedHash<Key, Hash>)>>;
|
||||
|
@ -208,6 +208,7 @@ struct HashTableGrower
|
||||
/// The state of this structure is enough to get the buffer size of the hash table.
|
||||
|
||||
UInt8 size_degree = initial_size_degree;
|
||||
static constexpr auto initial_count = 1ULL << initial_size_degree;
|
||||
|
||||
/// The size of the hash table in the cells.
|
||||
size_t bufSize() const { return 1ULL << size_degree; }
|
||||
@ -255,6 +256,7 @@ struct HashTableGrower
|
||||
template <size_t key_bits>
|
||||
struct HashTableFixedGrower
|
||||
{
|
||||
static constexpr auto initial_count = 1ULL << key_bits;
|
||||
size_t bufSize() const { return 1ULL << key_bits; }
|
||||
size_t place(size_t x) const { return x; }
|
||||
/// You could write __builtin_unreachable(), but the compiler does not optimize everything, and it turns out less efficiently.
|
||||
@ -309,6 +311,7 @@ struct ZeroValueStorage<false, Cell>
|
||||
};
|
||||
|
||||
|
||||
// The HashTable
|
||||
template
|
||||
<
|
||||
typename Key,
|
||||
@ -324,6 +327,14 @@ class HashTable :
|
||||
protected Cell::State,
|
||||
protected ZeroValueStorage<Cell::need_zero_value_storage, Cell> /// empty base optimization
|
||||
{
|
||||
public:
|
||||
// If we use an allocator with inline memory, check that the initial
|
||||
// size of the hash table is in sync with the amount of this memory.
|
||||
static constexpr size_t initial_buffer_bytes
|
||||
= Grower::initial_count * sizeof(Cell);
|
||||
static_assert(allocatorInitialBytes<Allocator> == 0
|
||||
|| allocatorInitialBytes<Allocator> == initial_buffer_bytes);
|
||||
|
||||
protected:
|
||||
friend class const_iterator;
|
||||
friend class iterator;
|
||||
|
@ -10,5 +10,5 @@
|
||||
*/
|
||||
using HashTableAllocator = Allocator<true /* clear_memory */, true /* mmap_populate */>;
|
||||
|
||||
template <size_t N = 64>
|
||||
using HashTableAllocatorWithStackMemory = AllocatorWithStackMemory<HashTableAllocator, N>;
|
||||
template <size_t initial_bytes = 64>
|
||||
using HashTableAllocatorWithStackMemory = AllocatorWithStackMemory<HashTableAllocator, initial_bytes>;
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -19,6 +20,7 @@ namespace ErrorCodes
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int CANNOT_OPEN_FILE;
|
||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
||||
extern const int CANNOT_CLOSE_FILE;
|
||||
}
|
||||
|
||||
static constexpr auto filename = "/proc/self/statm";
|
||||
@ -35,7 +37,18 @@ MemoryStatisticsOS::MemoryStatisticsOS()
|
||||
MemoryStatisticsOS::~MemoryStatisticsOS()
|
||||
{
|
||||
if (0 != ::close(fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + std::string(filename) + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
DB::tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
|
@ -49,11 +49,13 @@ MemoryTracker::~MemoryTracker()
|
||||
|
||||
void MemoryTracker::logPeakMemoryUsage() const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(peak));
|
||||
}
|
||||
|
||||
void MemoryTracker::logMemoryUsage(Int64 current) const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(current));
|
||||
}
|
||||
|
||||
@ -85,7 +87,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
|
||||
std::stringstream message;
|
||||
message << "Memory tracker";
|
||||
if (description)
|
||||
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
||||
message << " " << description;
|
||||
message << ": fault injected. Would use " << formatReadableSizeWithBinarySuffix(will_be)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
@ -117,7 +119,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
|
||||
std::stringstream message;
|
||||
message << "Memory limit";
|
||||
if (description)
|
||||
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
||||
message << " " << description;
|
||||
message << " exceeded: would use " << formatReadableSizeWithBinarySuffix(will_be)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
|
@ -35,7 +35,7 @@ private:
|
||||
CurrentMetrics::Metric metric = CurrentMetrics::end();
|
||||
|
||||
/// This description will be used as prefix into log messages (if isn't nullptr)
|
||||
const char * description = nullptr;
|
||||
std::atomic<const char *> description_ptr = nullptr;
|
||||
|
||||
void updatePeak(Int64 will_be);
|
||||
void logMemoryUsage(Int64 current) const;
|
||||
@ -114,9 +114,9 @@ public:
|
||||
metric = metric_;
|
||||
}
|
||||
|
||||
void setDescription(const char * description_)
|
||||
void setDescription(const char * description)
|
||||
{
|
||||
description = description_;
|
||||
description_ptr.store(description, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Reset the accumulated data
|
||||
|
@ -85,6 +85,11 @@ protected:
|
||||
|
||||
static_assert(pad_left <= empty_pod_array_size && "Left Padding exceeds empty_pod_array_size. Is the element size too large?");
|
||||
|
||||
// If we are using allocator with inline memory, the minimal size of
|
||||
// array must be in sync with the size of this memory.
|
||||
static_assert(allocatorInitialBytes<TAllocator> == 0
|
||||
|| allocatorInitialBytes<TAllocator> == initial_bytes);
|
||||
|
||||
char * c_start = null; /// Does not include pad_left.
|
||||
char * c_end = null;
|
||||
char * c_end_of_storage = null; /// Does not include pad_right.
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <common/find_symbols.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <sys/types.h>
|
||||
@ -22,6 +23,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int CANNOT_OPEN_FILE;
|
||||
extern const int CANNOT_CLOSE_FILE;
|
||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
||||
}
|
||||
|
||||
@ -39,6 +41,20 @@ namespace
|
||||
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
|
||||
}
|
||||
|
||||
inline void emitErrorMsgWithFailedToCloseFile(const std::string & filename)
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + filename + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
DB::tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t readFromFD(const int fd, const char * filename, char * buf, size_t buf_size)
|
||||
{
|
||||
ssize_t res = 0;
|
||||
@ -100,11 +116,11 @@ ProcfsMetricsProvider::ProcfsMetricsProvider(const pid_t /*tid*/)
|
||||
ProcfsMetricsProvider::~ProcfsMetricsProvider()
|
||||
{
|
||||
if (stats_version >= 3 && 0 != ::close(thread_io_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_io);
|
||||
if (0 != ::close(thread_stat_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_stat);
|
||||
if (0 != ::close(thread_schedstat_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_schedstat);
|
||||
}
|
||||
|
||||
|
||||
|
@ -67,9 +67,7 @@ private:
|
||||
template
|
||||
<
|
||||
typename TKey,
|
||||
typename Hash = DefaultHash<TKey>,
|
||||
typename Grower = HashTableGrower<>,
|
||||
typename Allocator = HashTableAllocator
|
||||
typename Hash = DefaultHash<TKey>
|
||||
>
|
||||
class SpaceSaving
|
||||
{
|
||||
@ -380,7 +378,7 @@ private:
|
||||
counter_map[counter->key] = counter;
|
||||
}
|
||||
|
||||
using CounterMap = HashMap<TKey, Counter *, Hash, Grower, Allocator>;
|
||||
using CounterMap = HashMapWithStackMemory<TKey, Counter *, Hash, 4>;
|
||||
|
||||
CounterMap counter_map;
|
||||
std::vector<Counter *> counter_list;
|
||||
|
@ -11,15 +11,15 @@ struct ContextHolder
|
||||
: shared_context(DB::Context::createShared())
|
||||
, context(DB::Context::createGlobal(shared_context.get()))
|
||||
{
|
||||
context.makeGlobalContext();
|
||||
context.setPath("./");
|
||||
}
|
||||
|
||||
ContextHolder(ContextHolder &&) = default;
|
||||
};
|
||||
|
||||
inline ContextHolder getContext()
|
||||
inline const ContextHolder & getContext()
|
||||
{
|
||||
ContextHolder holder;
|
||||
holder.context.makeGlobalContext();
|
||||
holder.context.setPath("./");
|
||||
static ContextHolder holder;
|
||||
return holder;
|
||||
}
|
||||
|
19
src/Common/tests/gtest_log.cpp
Normal file
19
src/Common/tests/gtest_log.cpp
Normal file
@ -0,0 +1,19 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <common/logger_useful.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
|
||||
|
||||
TEST(Logger, Log)
|
||||
{
|
||||
Poco::Logger::root().setLevel("none");
|
||||
Poco::Logger::root().setChannel(Poco::AutoPtr<Poco::NullChannel>(new Poco::NullChannel()));
|
||||
Logger * log = &Logger::get("Log");
|
||||
|
||||
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
|
||||
EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World"));
|
||||
}
|
@ -18,9 +18,9 @@ static void test1()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_size = 8;
|
||||
static constexpr size_t stack_threshold = 32;
|
||||
using Array = PODArray<UInt64, initial_size, AllocatorWithStackMemory<Allocator<false>, stack_threshold>>;
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
@ -139,9 +139,9 @@ static void test2()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_size = 8;
|
||||
static constexpr size_t stack_threshold = 32;
|
||||
using Array = PODArray<UInt64, initial_size, AllocatorWithStackMemory<Allocator<false>, stack_threshold>>;
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
@ -389,9 +389,9 @@ static void test3()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_size = 8;
|
||||
static constexpr size_t stack_threshold = 32;
|
||||
using Array = PODArray<UInt64, initial_size, AllocatorWithStackMemory<Allocator<false>, stack_threshold>>;
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
|
@ -18,17 +18,7 @@ PEERDIR(
|
||||
contrib/restricted/ryu
|
||||
)
|
||||
|
||||
# TODO: stub for config_version.h
|
||||
CFLAGS (GLOBAL -DDBMS_NAME=\"ClickHouse\")
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=0)
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=0)
|
||||
CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=0)
|
||||
CFLAGS (GLOBAL -DVERSION_FULL=\"ClickHouse\")
|
||||
CFLAGS (GLOBAL -DVERSION_INTEGER=0)
|
||||
CFLAGS (GLOBAL -DVERSION_NAME=\"ClickHouse\")
|
||||
CFLAGS (GLOBAL -DVERSION_OFFICIAL=\"\\\(arcadia\\\)\")
|
||||
CFLAGS (GLOBAL -DVERSION_REVISION=0)
|
||||
CFLAGS (GLOBAL -DVERSION_STRING=\"Unknown\")
|
||||
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
||||
|
||||
SRCS(
|
||||
ActionLock.cpp
|
||||
|
@ -156,7 +156,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingUInt64, priority, 0, "Priority of the query. 1 - the highest, higher value - lower priority; 0 - do not use priorities.", 0) \
|
||||
M(SettingInt64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \
|
||||
\
|
||||
M(SettingBool, log_queries, 0, "Log requests and write the log to the system table.", 0) \
|
||||
M(SettingBool, log_queries, 1, "Log requests and write the log to the system table.", 0) \
|
||||
M(SettingLogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "query_log minimal type to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \
|
||||
M(SettingUInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \
|
||||
\
|
||||
|
@ -21,19 +21,10 @@ class NullAndDoCopyBlockInputStream : public IBlockInputStream
|
||||
{
|
||||
public:
|
||||
NullAndDoCopyBlockInputStream(const BlockInputStreamPtr & input_, BlockOutputStreamPtr output_)
|
||||
: input(std::move(input_))
|
||||
, output(std::move(output_))
|
||||
{
|
||||
input_streams.push_back(input_);
|
||||
output_streams.push_back(output_);
|
||||
|
||||
for (auto & input_stream : input_streams)
|
||||
children.push_back(input_stream);
|
||||
}
|
||||
|
||||
NullAndDoCopyBlockInputStream(const BlockInputStreams & input_, BlockOutputStreams & output_)
|
||||
: input_streams(input_), output_streams(output_)
|
||||
{
|
||||
for (auto & input_stream : input_)
|
||||
children.push_back(input_stream);
|
||||
children.push_back(input);
|
||||
}
|
||||
|
||||
/// Suppress readPrefix and readSuffix, because they are called by copyData.
|
||||
@ -53,16 +44,13 @@ protected:
|
||||
/// If query was cancelled, it will be processed by child streams.
|
||||
/// Part of the data will be processed.
|
||||
|
||||
if (input_streams.size() == 1 && output_streams.size() == 1)
|
||||
copyData(*input_streams.at(0), *output_streams.at(0));
|
||||
else
|
||||
copyData(input_streams, output_streams);
|
||||
copyData(*input, *output);
|
||||
return Block();
|
||||
}
|
||||
|
||||
private:
|
||||
BlockInputStreams input_streams;
|
||||
BlockOutputStreams output_streams;
|
||||
BlockInputStreamPtr input;
|
||||
BlockOutputStreamPtr output;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Interpreters/addTypeConversionToAST.h>
|
||||
#include <Storages/MergeTree/TTLMode.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -67,6 +69,32 @@ TTLBlockInputStream::TTLBlockInputStream(
|
||||
default_expr_list, storage.getColumns().getAllPhysical());
|
||||
defaults_expression = ExpressionAnalyzer{default_expr_list, syntax_result, storage.global_context}.getActions(true);
|
||||
}
|
||||
|
||||
if (storage.hasRowsTTL() && storage.rows_ttl_entry.mode == TTLMode::GROUP_BY)
|
||||
{
|
||||
current_key_value.resize(storage.rows_ttl_entry.group_by_keys.size());
|
||||
|
||||
ColumnNumbers keys;
|
||||
for (const auto & key : storage.rows_ttl_entry.group_by_keys)
|
||||
keys.push_back(header.getPositionByName(key));
|
||||
agg_key_columns.resize(storage.rows_ttl_entry.group_by_keys.size());
|
||||
|
||||
AggregateDescriptions aggregates = storage.rows_ttl_entry.aggregate_descriptions;
|
||||
for (auto & descr : aggregates)
|
||||
if (descr.arguments.empty())
|
||||
for (const auto & name : descr.argument_names)
|
||||
descr.arguments.push_back(header.getPositionByName(name));
|
||||
agg_aggregate_columns.resize(storage.rows_ttl_entry.aggregate_descriptions.size());
|
||||
|
||||
const Settings & settings = storage.global_context.getSettingsRef();
|
||||
|
||||
Aggregator::Params params(header, keys, aggregates,
|
||||
false, settings.max_rows_to_group_by, settings.group_by_overflow_mode,
|
||||
SettingUInt64(0), SettingUInt64(0),
|
||||
settings.max_bytes_before_external_group_by, settings.empty_result_for_aggregation_by_empty_set,
|
||||
storage.global_context.getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data);
|
||||
aggregator = std::make_unique<Aggregator>(params);
|
||||
}
|
||||
}
|
||||
|
||||
bool TTLBlockInputStream::isTTLExpired(time_t ttl) const
|
||||
@ -77,7 +105,8 @@ bool TTLBlockInputStream::isTTLExpired(time_t ttl) const
|
||||
Block TTLBlockInputStream::readImpl()
|
||||
{
|
||||
/// Skip all data if table ttl is expired for part
|
||||
if (storage.hasRowsTTL() && isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
if (storage.hasRowsTTL() && !storage.rows_ttl_entry.where_expression &&
|
||||
storage.rows_ttl_entry.mode != TTLMode::GROUP_BY && isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
{
|
||||
rows_removed = data_part->rows_count;
|
||||
return {};
|
||||
@ -85,7 +114,16 @@ Block TTLBlockInputStream::readImpl()
|
||||
|
||||
Block block = children.at(0)->read();
|
||||
if (!block)
|
||||
{
|
||||
if (aggregator && !agg_result.empty())
|
||||
{
|
||||
MutableColumns result_columns = header.cloneEmptyColumns();
|
||||
finalizeAggregates(result_columns);
|
||||
block = header.cloneWithColumns(std::move(result_columns));
|
||||
}
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
if (storage.hasRowsTTL() && (force || isTTLExpired(old_ttl_infos.table_ttl.min)))
|
||||
removeRowsWithExpiredTableTTL(block);
|
||||
@ -114,35 +152,146 @@ void TTLBlockInputStream::readSuffixImpl()
|
||||
void TTLBlockInputStream::removeRowsWithExpiredTableTTL(Block & block)
|
||||
{
|
||||
storage.rows_ttl_entry.expression->execute(block);
|
||||
if (storage.rows_ttl_entry.where_expression)
|
||||
storage.rows_ttl_entry.where_expression->execute(block);
|
||||
|
||||
const IColumn * ttl_column =
|
||||
block.getByName(storage.rows_ttl_entry.result_column).column.get();
|
||||
|
||||
const IColumn * where_result_column = storage.rows_ttl_entry.where_expression ?
|
||||
block.getByName(storage.rows_ttl_entry.where_result_column).column.get() : nullptr;
|
||||
|
||||
const auto & column_names = header.getNames();
|
||||
MutableColumns result_columns;
|
||||
result_columns.reserve(column_names.size());
|
||||
|
||||
for (auto it = column_names.begin(); it != column_names.end(); ++it)
|
||||
if (!aggregator)
|
||||
{
|
||||
const IColumn * values_column = block.getByName(*it).column.get();
|
||||
MutableColumnPtr result_column = values_column->cloneEmpty();
|
||||
result_column->reserve(block.rows());
|
||||
MutableColumns result_columns;
|
||||
result_columns.reserve(column_names.size());
|
||||
for (auto it = column_names.begin(); it != column_names.end(); ++it)
|
||||
{
|
||||
const IColumn * values_column = block.getByName(*it).column.get();
|
||||
MutableColumnPtr result_column = values_column->cloneEmpty();
|
||||
result_column->reserve(block.rows());
|
||||
|
||||
for (size_t i = 0; i < block.rows(); ++i)
|
||||
{
|
||||
UInt32 cur_ttl = getTimestampByIndex(ttl_column, i);
|
||||
bool where_filter_passed = !where_result_column || where_result_column->getBool(i);
|
||||
if (!isTTLExpired(cur_ttl) || !where_filter_passed)
|
||||
{
|
||||
new_ttl_infos.table_ttl.update(cur_ttl);
|
||||
result_column->insertFrom(*values_column, i);
|
||||
}
|
||||
else if (it == column_names.begin())
|
||||
++rows_removed;
|
||||
}
|
||||
result_columns.emplace_back(std::move(result_column));
|
||||
}
|
||||
block = header.cloneWithColumns(std::move(result_columns));
|
||||
}
|
||||
else
|
||||
{
|
||||
MutableColumns result_columns = header.cloneEmptyColumns();
|
||||
MutableColumns aggregate_columns = header.cloneEmptyColumns();
|
||||
|
||||
size_t rows_aggregated = 0;
|
||||
size_t current_key_start = 0;
|
||||
size_t rows_with_current_key = 0;
|
||||
for (size_t i = 0; i < block.rows(); ++i)
|
||||
{
|
||||
UInt32 cur_ttl = getTimestampByIndex(ttl_column, i);
|
||||
if (!isTTLExpired(cur_ttl))
|
||||
bool where_filter_passed = !where_result_column || where_result_column->getBool(i);
|
||||
bool ttl_expired = isTTLExpired(cur_ttl) && where_filter_passed;
|
||||
|
||||
bool same_as_current = true;
|
||||
for (size_t j = 0; j < storage.rows_ttl_entry.group_by_keys.size(); ++j)
|
||||
{
|
||||
const String & key_column = storage.rows_ttl_entry.group_by_keys[j];
|
||||
const IColumn * values_column = block.getByName(key_column).column.get();
|
||||
if (!same_as_current || (*values_column)[i] != current_key_value[j])
|
||||
{
|
||||
values_column->get(i, current_key_value[j]);
|
||||
same_as_current = false;
|
||||
}
|
||||
}
|
||||
if (!same_as_current)
|
||||
{
|
||||
if (rows_with_current_key)
|
||||
calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key);
|
||||
finalizeAggregates(result_columns);
|
||||
|
||||
current_key_start = rows_aggregated;
|
||||
rows_with_current_key = 0;
|
||||
}
|
||||
|
||||
if (ttl_expired)
|
||||
{
|
||||
++rows_with_current_key;
|
||||
++rows_aggregated;
|
||||
for (const auto & name : column_names)
|
||||
{
|
||||
const IColumn * values_column = block.getByName(name).column.get();
|
||||
auto & column = aggregate_columns[header.getPositionByName(name)];
|
||||
column->insertFrom(*values_column, i);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
new_ttl_infos.table_ttl.update(cur_ttl);
|
||||
result_column->insertFrom(*values_column, i);
|
||||
for (const auto & name : column_names)
|
||||
{
|
||||
const IColumn * values_column = block.getByName(name).column.get();
|
||||
auto & column = result_columns[header.getPositionByName(name)];
|
||||
column->insertFrom(*values_column, i);
|
||||
}
|
||||
}
|
||||
else if (it == column_names.begin())
|
||||
++rows_removed;
|
||||
}
|
||||
result_columns.emplace_back(std::move(result_column));
|
||||
}
|
||||
|
||||
block = header.cloneWithColumns(std::move(result_columns));
|
||||
if (rows_with_current_key)
|
||||
calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key);
|
||||
|
||||
block = header.cloneWithColumns(std::move(result_columns));
|
||||
}
|
||||
}
|
||||
|
||||
void TTLBlockInputStream::calculateAggregates(const MutableColumns & aggregate_columns, size_t start_pos, size_t length)
|
||||
{
|
||||
Columns aggregate_chunk;
|
||||
aggregate_chunk.reserve(aggregate_columns.size());
|
||||
for (const auto & name : header.getNames())
|
||||
{
|
||||
const auto & column = aggregate_columns[header.getPositionByName(name)];
|
||||
ColumnPtr chunk_column = column->cut(start_pos, length);
|
||||
aggregate_chunk.emplace_back(std::move(chunk_column));
|
||||
}
|
||||
aggregator->executeOnBlock(aggregate_chunk, length, agg_result, agg_key_columns,
|
||||
agg_aggregate_columns, agg_no_more_keys);
|
||||
}
|
||||
|
||||
void TTLBlockInputStream::finalizeAggregates(MutableColumns & result_columns)
|
||||
{
|
||||
if (!agg_result.empty())
|
||||
{
|
||||
auto aggregated_res = aggregator->convertToBlocks(agg_result, true, 1);
|
||||
for (auto & agg_block : aggregated_res)
|
||||
{
|
||||
for (const auto & it : storage.rows_ttl_entry.group_by_aggregations)
|
||||
std::get<2>(it)->execute(agg_block);
|
||||
for (const auto & name : storage.rows_ttl_entry.group_by_keys)
|
||||
{
|
||||
const IColumn * values_column = agg_block.getByName(name).column.get();
|
||||
auto & result_column = result_columns[header.getPositionByName(name)];
|
||||
result_column->insertRangeFrom(*values_column, 0, agg_block.rows());
|
||||
}
|
||||
for (const auto & it : storage.rows_ttl_entry.group_by_aggregations)
|
||||
{
|
||||
const IColumn * values_column = agg_block.getByName(get<1>(it)).column.get();
|
||||
auto & result_column = result_columns[header.getPositionByName(std::get<0>(it))];
|
||||
result_column->insertRangeFrom(*values_column, 0, agg_block.rows());
|
||||
}
|
||||
}
|
||||
}
|
||||
agg_result.invalidate();
|
||||
}
|
||||
|
||||
void TTLBlockInputStream::removeValuesWithExpiredColumnTTL(Block & block)
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/IMergeTreeDataPart.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
|
||||
#include <common/DateLUT.h>
|
||||
|
||||
@ -39,6 +40,13 @@ private:
|
||||
time_t current_time;
|
||||
bool force;
|
||||
|
||||
std::unique_ptr<Aggregator> aggregator;
|
||||
std::vector<Field> current_key_value;
|
||||
AggregatedDataVariants agg_result;
|
||||
ColumnRawPtrs agg_key_columns;
|
||||
Aggregator::AggregateColumns agg_aggregate_columns;
|
||||
bool agg_no_more_keys = false;
|
||||
|
||||
IMergeTreeDataPart::TTLInfos old_ttl_infos;
|
||||
IMergeTreeDataPart::TTLInfos new_ttl_infos;
|
||||
NameSet empty_columns;
|
||||
@ -59,6 +67,12 @@ private:
|
||||
/// Removes rows with expired table ttl and computes new ttl_infos for part
|
||||
void removeRowsWithExpiredTableTTL(Block & block);
|
||||
|
||||
// Calculate aggregates of aggregate_columns into agg_result
|
||||
void calculateAggregates(const MutableColumns & aggregate_columns, size_t start_pos, size_t length);
|
||||
|
||||
/// Finalize agg_result into result_columns
|
||||
void finalizeAggregates(MutableColumns & result_columns);
|
||||
|
||||
/// Updates TTL for moves
|
||||
void updateMovesTTL(Block & block);
|
||||
|
||||
|
@ -1,9 +1,6 @@
|
||||
#include <thread>
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <DataStreams/IBlockOutputStream.h>
|
||||
#include <DataStreams/copyData.h>
|
||||
#include <DataStreams/ParallelInputsProcessor.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
|
||||
@ -55,79 +52,6 @@ void copyDataImpl(IBlockInputStream & from, IBlockOutputStream & to, TCancelCall
|
||||
|
||||
inline void doNothing(const Block &) {}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
|
||||
struct ParallelInsertsHandler
|
||||
{
|
||||
using CencellationHook = std::function<void()>;
|
||||
|
||||
explicit ParallelInsertsHandler(BlockOutputStreams & output_streams, CencellationHook cancellation_hook_, size_t num_threads)
|
||||
: outputs(output_streams.size()), cancellation_hook(std::move(cancellation_hook_))
|
||||
{
|
||||
exceptions.resize(num_threads);
|
||||
|
||||
for (auto & output : output_streams)
|
||||
outputs.push(output.get());
|
||||
}
|
||||
|
||||
void onBlock(Block & block, size_t /*thread_num*/)
|
||||
{
|
||||
IBlockOutputStream * out = nullptr;
|
||||
|
||||
outputs.pop(out);
|
||||
out->write(block);
|
||||
outputs.push(out);
|
||||
}
|
||||
|
||||
void onFinishThread(size_t /*thread_num*/) {}
|
||||
void onFinish() {}
|
||||
|
||||
void onException(std::exception_ptr & exception, size_t thread_num)
|
||||
{
|
||||
exceptions[thread_num] = exception;
|
||||
cancellation_hook();
|
||||
}
|
||||
|
||||
void rethrowFirstException()
|
||||
{
|
||||
for (auto & exception : exceptions)
|
||||
if (exception)
|
||||
std::rethrow_exception(exception);
|
||||
}
|
||||
|
||||
ConcurrentBoundedQueue<IBlockOutputStream *> outputs;
|
||||
std::vector<std::exception_ptr> exceptions;
|
||||
CencellationHook cancellation_hook;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
static void copyDataImpl(BlockInputStreams & inputs, BlockOutputStreams & outputs)
|
||||
{
|
||||
for (auto & output : outputs)
|
||||
output->writePrefix();
|
||||
|
||||
using Processor = ParallelInputsProcessor<ParallelInsertsHandler>;
|
||||
Processor * processor_ptr = nullptr;
|
||||
|
||||
ParallelInsertsHandler handler(outputs, [&processor_ptr]() { processor_ptr->cancel(false); }, inputs.size());
|
||||
ParallelInputsProcessor<ParallelInsertsHandler> processor(inputs, nullptr, inputs.size(), handler);
|
||||
processor_ptr = &processor;
|
||||
|
||||
processor.process();
|
||||
processor.wait();
|
||||
handler.rethrowFirstException();
|
||||
|
||||
/// readPrefix is called in ParallelInputsProcessor.
|
||||
for (auto & input : inputs)
|
||||
input->readSuffix();
|
||||
|
||||
for (auto & output : outputs)
|
||||
output->writeSuffix();
|
||||
}
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, std::atomic<bool> * is_cancelled)
|
||||
{
|
||||
auto is_cancelled_pred = [is_cancelled] ()
|
||||
@ -138,11 +62,6 @@ void copyData(IBlockInputStream & from, IBlockOutputStream & to, std::atomic<boo
|
||||
copyDataImpl(from, to, is_cancelled_pred, doNothing);
|
||||
}
|
||||
|
||||
void copyData(BlockInputStreams & inputs, BlockOutputStreams & outputs)
|
||||
{
|
||||
copyDataImpl(inputs, outputs);
|
||||
}
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<bool()> & is_cancelled)
|
||||
{
|
||||
copyDataImpl(from, to, is_cancelled, doNothing);
|
||||
|
@ -16,8 +16,6 @@ class Block;
|
||||
*/
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, std::atomic<bool> * is_cancelled = nullptr);
|
||||
|
||||
void copyData(BlockInputStreams & inputs, BlockOutputStreams & outputs);
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<bool()> & is_cancelled);
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<bool()> & is_cancelled,
|
||||
|
@ -302,37 +302,33 @@ void CacheDictionary::getItemsString(
|
||||
|
||||
/// Request new values sync.
|
||||
/// We have request both cache_not_found_ids and cache_expired_ids.
|
||||
if (!cache_not_found_ids.empty())
|
||||
std::vector<Key> required_ids;
|
||||
required_ids.reserve(cache_not_found_ids.size() + cache_expired_ids.size());
|
||||
std::transform(
|
||||
std::begin(cache_not_found_ids), std::end(cache_not_found_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
std::transform(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
{
|
||||
std::vector<Key> required_ids;
|
||||
required_ids.reserve(cache_not_found_ids.size() + cache_expired_ids.size());
|
||||
std::transform(
|
||||
std::begin(cache_not_found_ids), std::end(cache_not_found_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
std::transform(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
{
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
map[id] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * cache_not_found_ids[id].size();
|
||||
};
|
||||
|
||||
map[id] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * cache_not_found_ids[id].size();
|
||||
};
|
||||
auto on_id_not_found = [&] (const auto id, const auto)
|
||||
{
|
||||
for (const auto row : cache_not_found_ids[id])
|
||||
total_length += get_default(row).size + 1;
|
||||
};
|
||||
|
||||
auto on_id_not_found = [&] (const auto id, const auto)
|
||||
{
|
||||
for (const auto row : cache_not_found_ids[id])
|
||||
total_length += get_default(row).size + 1;
|
||||
};
|
||||
|
||||
auto update_unit_ptr = std::make_shared<UpdateUnit>(required_ids, on_cell_updated, on_id_not_found);
|
||||
|
||||
tryPushToUpdateQueueOrThrow(update_unit_ptr);
|
||||
waitForCurrentUpdateFinish(update_unit_ptr);
|
||||
}
|
||||
auto update_unit_ptr = std::make_shared<UpdateUnit>(required_ids, on_cell_updated, on_id_not_found);
|
||||
|
||||
tryPushToUpdateQueueOrThrow(update_unit_ptr);
|
||||
waitForCurrentUpdateFinish(update_unit_ptr);
|
||||
out->getChars().reserve(total_length);
|
||||
|
||||
for (const auto row : ext::range(0, ext::size(ids)))
|
||||
|
@ -32,11 +32,12 @@ namespace
|
||||
BITS32 = 5,
|
||||
};
|
||||
|
||||
// The following condition must always be true:
|
||||
// any_cursor_position < min(END_OF_VARINT, END_OF_GROUP)
|
||||
// This inequation helps to check conditions in SimpleReader.
|
||||
constexpr UInt64 END_OF_VARINT = static_cast<UInt64>(-1);
|
||||
constexpr UInt64 END_OF_GROUP = static_cast<UInt64>(-2);
|
||||
// The following conditions must always be true:
|
||||
// any_cursor_position > END_OF_VARINT
|
||||
// any_cursor_position > END_OF_GROUP
|
||||
// Those inequations helps checking conditions in ProtobufReader::SimpleReader.
|
||||
constexpr Int64 END_OF_VARINT = -1;
|
||||
constexpr Int64 END_OF_GROUP = -2;
|
||||
|
||||
Int64 decodeZigZag(UInt64 n) { return static_cast<Int64>((n >> 1) ^ (~(n & 1) + 1)); }
|
||||
|
||||
@ -77,7 +78,7 @@ void ProtobufReader::SimpleReader::endMessage(bool ignore_errors)
|
||||
if (!current_message_level)
|
||||
return;
|
||||
|
||||
UInt64 root_message_end = (current_message_level == 1) ? current_message_end : parent_message_ends.front();
|
||||
Int64 root_message_end = (current_message_level == 1) ? current_message_end : parent_message_ends.front();
|
||||
if (cursor != root_message_end)
|
||||
{
|
||||
if (cursor < root_message_end)
|
||||
@ -95,6 +96,9 @@ void ProtobufReader::SimpleReader::endMessage(bool ignore_errors)
|
||||
void ProtobufReader::SimpleReader::startNestedMessage()
|
||||
{
|
||||
assert(current_message_level >= 1);
|
||||
if ((cursor > field_end) && (field_end != END_OF_GROUP))
|
||||
throwUnknownFormat();
|
||||
|
||||
// Start reading a nested message which is located inside a length-delimited field
|
||||
// of another message.
|
||||
parent_message_ends.emplace_back(current_message_end);
|
||||
@ -146,7 +150,7 @@ bool ProtobufReader::SimpleReader::readFieldNumber(UInt32 & field_number)
|
||||
throwUnknownFormat();
|
||||
}
|
||||
|
||||
if (cursor >= current_message_end)
|
||||
if ((cursor >= current_message_end) && (current_message_end != END_OF_GROUP))
|
||||
return false;
|
||||
|
||||
UInt64 varint = readVarint();
|
||||
@ -196,11 +200,17 @@ bool ProtobufReader::SimpleReader::readFieldNumber(UInt32 & field_number)
|
||||
|
||||
bool ProtobufReader::SimpleReader::readUInt(UInt64 & value)
|
||||
{
|
||||
if (field_end == END_OF_VARINT)
|
||||
{
|
||||
value = readVarint();
|
||||
field_end = cursor;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(cursor >= field_end))
|
||||
return false;
|
||||
|
||||
value = readVarint();
|
||||
if (field_end == END_OF_VARINT)
|
||||
field_end = cursor;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -227,6 +237,7 @@ bool ProtobufReader::SimpleReader::readFixed(T & value)
|
||||
{
|
||||
if (unlikely(cursor >= field_end))
|
||||
return false;
|
||||
|
||||
readBinary(&value, sizeof(T));
|
||||
return true;
|
||||
}
|
||||
|
@ -124,12 +124,12 @@ private:
|
||||
void ignoreGroup();
|
||||
|
||||
ReadBuffer & in;
|
||||
UInt64 cursor;
|
||||
Int64 cursor;
|
||||
size_t current_message_level;
|
||||
UInt64 current_message_end;
|
||||
std::vector<UInt64> parent_message_ends;
|
||||
UInt64 field_end;
|
||||
UInt64 last_string_pos;
|
||||
Int64 current_message_end;
|
||||
std::vector<Int64> parent_message_ends;
|
||||
Int64 field_end;
|
||||
Int64 last_string_pos;
|
||||
};
|
||||
|
||||
class IConverter
|
||||
|
129
src/Functions/URL/port.cpp
Normal file
129
src/Functions/URL/port.cpp
Normal file
@ -0,0 +1,129 @@
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunctionImpl.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include "domain.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
struct FunctionPort : public IFunction
|
||||
{
|
||||
static constexpr auto name = "port";
|
||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionPort>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
if (arguments.size() != 1 && arguments.size() != 2)
|
||||
throw Exception("Number of arguments for function " + getName() + " doesn't match: passed "
|
||||
+ std::to_string(arguments.size()) + ", should be 1 or 2",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!WhichDataType(arguments[0].type).isString())
|
||||
throw Exception("Illegal type " + arguments[0].type->getName() + " of first argument of function " + getName() + ". Must be String.",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
if (arguments.size() == 2 && !WhichDataType(arguments[1].type).isUInt16())
|
||||
throw Exception("Illegal type " + arguments[1].type->getName() + " of second argument of function " + getName() + ". Must be UInt16.",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
return std::make_shared<DataTypeUInt16>();
|
||||
}
|
||||
|
||||
|
||||
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t) override
|
||||
{
|
||||
UInt16 default_port = 0;
|
||||
if (arguments.size() == 2)
|
||||
{
|
||||
const auto * port_column = checkAndGetColumn<ColumnConst>(block.getByPosition(arguments[1]).column.get());
|
||||
if (!port_column)
|
||||
throw Exception("Second argument for function " + getName() + " must be constant UInt16", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
default_port = port_column->getValue<UInt16>();
|
||||
}
|
||||
|
||||
const ColumnPtr url_column = block.getByPosition(arguments[0]).column;
|
||||
if (const ColumnString * url_strs = checkAndGetColumn<ColumnString>(url_column.get()))
|
||||
{
|
||||
auto col_res = ColumnVector<UInt16>::create();
|
||||
typename ColumnVector<UInt16>::Container & vec_res = col_res->getData();
|
||||
vec_res.resize(url_column->size());
|
||||
|
||||
vector(default_port, url_strs->getChars(), url_strs->getOffsets(), vec_res);
|
||||
block.getByPosition(result).column = std::move(col_res);
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Illegal column " + block.getByPosition(arguments[0]).column->getName() + " of argument of function " + getName(),
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
|
||||
private:
|
||||
static void vector(UInt16 default_port, const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray<UInt16> & res)
|
||||
{
|
||||
size_t size = offsets.size();
|
||||
|
||||
ColumnString::Offset prev_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
res[i] = extractPort(default_port, data, prev_offset, offsets[i] - prev_offset - 1);
|
||||
prev_offset = offsets[i];
|
||||
}
|
||||
}
|
||||
|
||||
static UInt16 extractPort(UInt16 default_port, const ColumnString::Chars & buf, size_t offset, size_t size)
|
||||
{
|
||||
const char * p = reinterpret_cast<const char *>(&buf[0]) + offset;
|
||||
const char * end = p + size;
|
||||
|
||||
StringRef host = getURLHost(p, size);
|
||||
if (!host.size)
|
||||
return default_port;
|
||||
if (host.size == size)
|
||||
return default_port;
|
||||
|
||||
p = host.data + host.size;
|
||||
if (*p++ != ':')
|
||||
return default_port;
|
||||
|
||||
Int64 port = default_port;
|
||||
while (p < end)
|
||||
{
|
||||
if (*p == '/')
|
||||
break;
|
||||
if (!isNumericASCII(*p))
|
||||
return default_port;
|
||||
|
||||
port = (port * 10) + (*p - '0');
|
||||
if (port < 0 || port > UInt16(-1))
|
||||
return default_port;
|
||||
++p;
|
||||
}
|
||||
return port;
|
||||
}
|
||||
};
|
||||
|
||||
void registerFunctionPort(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionPort>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ void registerFunctionDomain(FunctionFactory & factory);
|
||||
void registerFunctionDomainWithoutWWW(FunctionFactory & factory);
|
||||
void registerFunctionFirstSignificantSubdomain(FunctionFactory & factory);
|
||||
void registerFunctionTopLevelDomain(FunctionFactory & factory);
|
||||
void registerFunctionPort(FunctionFactory & factory);
|
||||
void registerFunctionPath(FunctionFactory & factory);
|
||||
void registerFunctionPathFull(FunctionFactory & factory);
|
||||
void registerFunctionQueryString(FunctionFactory & factory);
|
||||
@ -33,6 +34,7 @@ void registerFunctionsURL(FunctionFactory & factory)
|
||||
registerFunctionDomainWithoutWWW(factory);
|
||||
registerFunctionFirstSignificantSubdomain(factory);
|
||||
registerFunctionTopLevelDomain(factory);
|
||||
registerFunctionPort(factory);
|
||||
registerFunctionPath(factory);
|
||||
registerFunctionPathFull(factory);
|
||||
registerFunctionQueryString(factory);
|
||||
|
@ -153,10 +153,8 @@ bool FunctionArrayDistinct::executeNumber(
|
||||
if (nullable_col)
|
||||
src_null_map = &nullable_col->getNullMapData();
|
||||
|
||||
using Set = ClearableHashSet<T,
|
||||
DefaultHash<T>,
|
||||
HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(T)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<T, DefaultHash<T>,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
Set set;
|
||||
|
||||
@ -201,10 +199,8 @@ bool FunctionArrayDistinct::executeString(
|
||||
|
||||
ColumnString & res_data_column_string = typeid_cast<ColumnString &>(res_data_col);
|
||||
|
||||
using Set = ClearableHashSet<StringRef,
|
||||
StringRefHash,
|
||||
HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<StringRef, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
const PaddedPODArray<UInt8> * src_null_map = nullptr;
|
||||
|
||||
@ -249,8 +245,8 @@ void FunctionArrayDistinct::executeHashed(
|
||||
ColumnArray::Offsets & res_offsets,
|
||||
const ColumnNullable * nullable_col)
|
||||
{
|
||||
using Set = ClearableHashSet<UInt128, UInt128TrivialHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<UInt128, UInt128TrivialHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
const PaddedPODArray<UInt8> * src_null_map = nullptr;
|
||||
|
||||
|
@ -64,36 +64,41 @@ private:
|
||||
template <typename T>
|
||||
struct MethodOneNumber
|
||||
{
|
||||
using Set = ClearableHashMap<T, UInt32, DefaultHash<T>, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(T)>>;
|
||||
using Set = ClearableHashMapWithStackMemory<T, UInt32, DefaultHash<T>,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodOneNumber<typename Set::value_type, UInt32, T, false>;
|
||||
};
|
||||
|
||||
struct MethodString
|
||||
{
|
||||
using Set = ClearableHashMap<StringRef, UInt32, StringRefHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using Set = ClearableHashMapWithStackMemory<StringRef, UInt32, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodString<typename Set::value_type, UInt32, false, false>;
|
||||
};
|
||||
|
||||
struct MethodFixedString
|
||||
{
|
||||
using Set = ClearableHashMap<StringRef, UInt32, StringRefHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using Set = ClearableHashMapWithStackMemory<StringRef, UInt32, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodFixedString<typename Set::value_type, UInt32, false, false>;
|
||||
};
|
||||
|
||||
struct MethodFixed
|
||||
{
|
||||
using Set = ClearableHashMap<UInt128, UInt32, UInt128HashCRC32, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Set = ClearableHashMapWithStackMemory<UInt128, UInt32, UInt128HashCRC32,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodKeysFixed<typename Set::value_type, UInt128, UInt32, false, false, false>;
|
||||
};
|
||||
|
||||
struct MethodHashed
|
||||
{
|
||||
using Set = ClearableHashMap<UInt128, UInt32, UInt128TrivialHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Set = ClearableHashMapWithStackMemory<UInt128, UInt32, UInt128TrivialHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodHashed<typename Set::value_type, UInt32, false>;
|
||||
};
|
||||
|
||||
|
@ -308,12 +308,9 @@ void FunctionArrayEnumerateRankedExtended<Derived>::executeMethodImpl(
|
||||
const size_t depth_to_look = arrays_depths.max_array_depth;
|
||||
const auto & offsets = *offsets_by_depth[depth_to_look - 1];
|
||||
|
||||
using Map = ClearableHashMap<
|
||||
UInt128,
|
||||
UInt32,
|
||||
UInt128TrivialHash,
|
||||
HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Map = ClearableHashMapWithStackMemory<UInt128, UInt32,
|
||||
UInt128TrivialHash, INITIAL_SIZE_DEGREE>;
|
||||
|
||||
Map indices;
|
||||
|
||||
std::vector<size_t> indices_by_depth(depth_to_look);
|
||||
|
@ -418,16 +418,15 @@ void FunctionArrayIntersect::executeImpl(Block & block, const ColumnNumbers & ar
|
||||
TypeListNativeNumbers::forEach(NumberExecutor(arrays, not_nullable_nested_return_type, result_column));
|
||||
TypeListDecimalNumbers::forEach(DecimalExecutor(arrays, not_nullable_nested_return_type, result_column));
|
||||
|
||||
using DateMap = ClearableHashMap<DataTypeDate::FieldType, size_t, DefaultHash<DataTypeDate::FieldType>,
|
||||
HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(DataTypeDate::FieldType)>>;
|
||||
using DateMap = ClearableHashMapWithStackMemory<DataTypeDate::FieldType,
|
||||
size_t, DefaultHash<DataTypeDate::FieldType>, INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using DateTimeMap = ClearableHashMap<DataTypeDateTime::FieldType, size_t, DefaultHash<DataTypeDateTime::FieldType>,
|
||||
HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(DataTypeDateTime::FieldType)>>;
|
||||
using DateTimeMap = ClearableHashMapWithStackMemory<
|
||||
DataTypeDateTime::FieldType, size_t,
|
||||
DefaultHash<DataTypeDateTime::FieldType>, INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using StringMap = ClearableHashMap<StringRef, size_t, StringRefHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using StringMap = ClearableHashMapWithStackMemory<StringRef, size_t,
|
||||
StringRefHash, INITIAL_SIZE_DEGREE>;
|
||||
|
||||
if (!result_column)
|
||||
{
|
||||
@ -455,8 +454,8 @@ void FunctionArrayIntersect::executeImpl(Block & block, const ColumnNumbers & ar
|
||||
template <typename T, size_t>
|
||||
void FunctionArrayIntersect::NumberExecutor::operator()()
|
||||
{
|
||||
using Map = ClearableHashMap<T, size_t, DefaultHash<T>, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(T)>>;
|
||||
using Map = ClearableHashMapWithStackMemory<T, size_t, DefaultHash<T>,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
if (!result && typeid_cast<const DataTypeNumber<T> *>(data_type.get()))
|
||||
result = execute<Map, ColumnVector<T>, true>(arrays, ColumnVector<T>::create());
|
||||
@ -465,8 +464,8 @@ void FunctionArrayIntersect::NumberExecutor::operator()()
|
||||
template <typename T, size_t>
|
||||
void FunctionArrayIntersect::DecimalExecutor::operator()()
|
||||
{
|
||||
using Map = ClearableHashMap<T, size_t, DefaultHash<T>, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(T)>>;
|
||||
using Map = ClearableHashMapWithStackMemory<T, size_t, DefaultHash<T>,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
if (!result)
|
||||
if (auto * decimal = typeid_cast<const DataTypeDecimal<T> *>(data_type.get()))
|
||||
|
@ -66,36 +66,41 @@ private:
|
||||
template <typename T>
|
||||
struct MethodOneNumber
|
||||
{
|
||||
using Set = ClearableHashSet<T, DefaultHash<T>, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(T)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<T, DefaultHash<T>,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodOneNumber<typename Set::value_type, void, T, false>;
|
||||
};
|
||||
|
||||
struct MethodString
|
||||
{
|
||||
using Set = ClearableHashSet<StringRef, StringRefHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<StringRef, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodString<typename Set::value_type, void, false, false>;
|
||||
};
|
||||
|
||||
struct MethodFixedString
|
||||
{
|
||||
using Set = ClearableHashSet<StringRef, StringRefHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<StringRef, StringRefHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodFixedString<typename Set::value_type, void, false, false>;
|
||||
};
|
||||
|
||||
struct MethodFixed
|
||||
{
|
||||
using Set = ClearableHashSet<UInt128, UInt128HashCRC32, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<UInt128, UInt128HashCRC32,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodKeysFixed<typename Set::value_type, UInt128, void, false, false, false>;
|
||||
};
|
||||
|
||||
struct MethodHashed
|
||||
{
|
||||
using Set = ClearableHashSet<UInt128, UInt128TrivialHash, HashTableGrower<INITIAL_SIZE_DEGREE>,
|
||||
HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>;
|
||||
using Set = ClearableHashSetWithStackMemory<UInt128, UInt128TrivialHash,
|
||||
INITIAL_SIZE_DEGREE>;
|
||||
|
||||
using Method = ColumnsHashing::HashMethodHashed<typename Set::value_type, void, false>;
|
||||
};
|
||||
|
||||
|
@ -425,6 +425,7 @@ SRCS(
|
||||
URL/path.cpp
|
||||
URL/pathFull.cpp
|
||||
URL/protocol.cpp
|
||||
URL/port.cpp
|
||||
URL/queryStringAndFragment.cpp
|
||||
URL/queryString.cpp
|
||||
URL/registerFunctionsURL.cpp
|
||||
|
@ -283,7 +283,9 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
if (buf.eof())
|
||||
throw Exception("Cannot parse escape sequence", ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE);
|
||||
|
||||
if (*buf.position() == 'x')
|
||||
char char_after_backslash = *buf.position();
|
||||
|
||||
if (char_after_backslash == 'x')
|
||||
{
|
||||
++buf.position();
|
||||
/// escape sequence of the form \xAA
|
||||
@ -291,7 +293,7 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
readPODBinary(hex_code, buf);
|
||||
s.push_back(unhex2(hex_code));
|
||||
}
|
||||
else if (*buf.position() == 'N')
|
||||
else if (char_after_backslash == 'N')
|
||||
{
|
||||
/// Support for NULLs: \N sequence must be parsed as empty string.
|
||||
++buf.position();
|
||||
@ -299,7 +301,22 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
else
|
||||
{
|
||||
/// The usual escape sequence of a single character.
|
||||
s.push_back(parseEscapeSequence(*buf.position()));
|
||||
char decoded_char = parseEscapeSequence(char_after_backslash);
|
||||
|
||||
/// For convenience using LIKE and regular expressions,
|
||||
/// we leave backslash when user write something like 'Hello 100\%':
|
||||
/// it is parsed like Hello 100\% instead of Hello 100%
|
||||
if (decoded_char != '\\'
|
||||
&& decoded_char != '\''
|
||||
&& decoded_char != '"'
|
||||
&& decoded_char != '`' /// MySQL style identifiers
|
||||
&& decoded_char != '/' /// JavaScript in HTML
|
||||
&& !isControlASCII(decoded_char))
|
||||
{
|
||||
s.push_back('\\');
|
||||
}
|
||||
|
||||
s.push_back(decoded_char);
|
||||
++buf.position();
|
||||
}
|
||||
}
|
||||
|
@ -37,9 +37,6 @@ target_link_libraries (parse_int_perf2 PRIVATE clickhouse_common_io)
|
||||
add_executable (read_write_int read_write_int.cpp)
|
||||
target_link_libraries (read_write_int PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (mempbrk mempbrk.cpp)
|
||||
target_link_libraries (mempbrk PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (o_direct_and_dirty_pages o_direct_and_dirty_pages.cpp)
|
||||
target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io)
|
||||
|
||||
|
@ -1,90 +0,0 @@
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
|
||||
#include <Common/Stopwatch.h>
|
||||
|
||||
#include <Core/Types.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
|
||||
#include <common/find_symbols.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_PARSE_ESCAPE_SEQUENCE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace test
|
||||
{
|
||||
static void readEscapedString(DB::String & s, DB::ReadBuffer & buf)
|
||||
{
|
||||
s = "";
|
||||
while (!buf.eof())
|
||||
{
|
||||
const char * next_pos = find_first_symbols<'\b', '\f', '\n', '\r', '\t', '\0', '\\'>(buf.position(), buf.buffer().end());
|
||||
|
||||
s.append(buf.position(), next_pos - buf.position());
|
||||
buf.position() += next_pos - buf.position();
|
||||
|
||||
if (!buf.hasPendingData())
|
||||
continue;
|
||||
|
||||
if (*buf.position() == '\t' || *buf.position() == '\n')
|
||||
return;
|
||||
|
||||
if (*buf.position() == '\\')
|
||||
{
|
||||
++buf.position();
|
||||
if (buf.eof())
|
||||
throw DB::Exception("Cannot parse escape sequence", DB::ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE);
|
||||
s += DB::parseEscapeSequence(*buf.position());
|
||||
++buf.position();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
try
|
||||
{
|
||||
DB::ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
// DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO);
|
||||
std::string s;
|
||||
size_t rows = 0;
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
while (!in.eof())
|
||||
{
|
||||
test::readEscapedString(s, in);
|
||||
in.ignore();
|
||||
|
||||
++rows;
|
||||
|
||||
/* DB::writeEscapedString(s, out);
|
||||
DB::writeChar('\n', out);*/
|
||||
}
|
||||
|
||||
watch.stop();
|
||||
std::cerr << std::fixed << std::setprecision(2)
|
||||
<< "Read " << rows << " rows (" << in.count() / 1000000.0 << " MB) in " << watch.elapsedSeconds() << " sec., "
|
||||
<< rows / watch.elapsedSeconds() << " rows/sec. (" << in.count() / watch.elapsedSeconds() / 1000000 << " MB/s.)"
|
||||
<< std::endl;
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user