mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge branch 'master' into async-insert
This commit is contained in:
commit
7a88ca31cc
@ -290,6 +290,12 @@ if (COMPILER_GCC OR COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
||||||
|
# benchmarks.
|
||||||
|
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||||
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||||
|
endif ()
|
||||||
|
|
||||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||||
|
|
||||||
|
@ -271,9 +271,13 @@ struct integer<Bits, Signed>::_impl
|
|||||||
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
||||||
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
||||||
/// int64_t max value precisely.
|
/// int64_t max value precisely.
|
||||||
|
|
||||||
|
//TODO Be compatible with Apple aarch64
|
||||||
|
#if not (defined(__APPLE__) && defined(__aarch64__))
|
||||||
static_assert(LDBL_MANT_DIG >= 64,
|
static_assert(LDBL_MANT_DIG >= 64,
|
||||||
"On your system long double has less than 64 precision bits,"
|
"On your system long double has less than 64 precision bits,"
|
||||||
"which may result in UB when initializing double from int64_t");
|
"which may result in UB when initializing double from int64_t");
|
||||||
|
#endif
|
||||||
|
|
||||||
if ((rhs > 0 && rhs < static_cast<long double>(max_int)) || (rhs < 0 && rhs > static_cast<long double>(min_int)))
|
if ((rhs > 0 && rhs < static_cast<long double>(max_int)) || (rhs < 0 && rhs > static_cast<long double>(min_int)))
|
||||||
{
|
{
|
||||||
|
@ -12,7 +12,8 @@
|
|||||||
///
|
///
|
||||||
/// NOTE: it should be used with caution.
|
/// NOTE: it should be used with caution.
|
||||||
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
|
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
|
||||||
MemoryTracker::LockExceptionInThread lock_memory_tracker; \
|
MemoryTracker::LockExceptionInThread \
|
||||||
|
lock_memory_tracker(VariableContext::Global); \
|
||||||
__VA_ARGS__; \
|
__VA_ARGS__; \
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -56,7 +57,8 @@
|
|||||||
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
|
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
|
||||||
try \
|
try \
|
||||||
{ \
|
{ \
|
||||||
MemoryTracker::LockExceptionInThread lock_memory_tracker; \
|
MemoryTracker::LockExceptionInThread \
|
||||||
|
lock_memory_tracker(VariableContext::Global); \
|
||||||
__VA_ARGS__; \
|
__VA_ARGS__; \
|
||||||
} \
|
} \
|
||||||
catch (...) \
|
catch (...) \
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||||
set (ARCH_AMD64 1)
|
set (ARCH_AMD64 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
|
||||||
set (ARCH_AARCH64 1)
|
set (ARCH_AARCH64 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
||||||
|
@ -4,6 +4,9 @@ set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
|
|||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh")
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh")
|
||||||
|
if (ARCH_AARCH64)
|
||||||
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc")
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||||
|
14
cmake/darwin/toolchain-aarch64.cmake
Normal file
14
cmake/darwin/toolchain-aarch64.cmake
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
set (CMAKE_SYSTEM_NAME "Darwin")
|
||||||
|
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
|
||||||
|
set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
|
||||||
|
|
||||||
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
|
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||||
|
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||||
|
|
||||||
|
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||||
|
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
@ -64,7 +64,8 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
|||||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
||||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
|
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
|
||||||
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||||
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" )
|
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||||
|
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" )
|
||||||
)
|
)
|
||||||
set (_ldap_supported_platform TRUE)
|
set (_ldap_supported_platform TRUE)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
if (OS_DARWIN AND ARCH_AARCH64)
|
||||||
|
set (ENABLE_ROCKSDB OFF CACHE INTERNAL "")
|
||||||
|
endif()
|
||||||
|
|
||||||
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
|
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_ROCKSDB)
|
if (NOT ENABLE_ROCKSDB)
|
||||||
|
6
contrib/CMakeLists.txt
vendored
6
contrib/CMakeLists.txt
vendored
@ -96,14 +96,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
|
|||||||
add_subdirectory (${INTERNAL_ZLIB_NAME})
|
add_subdirectory (${INTERNAL_ZLIB_NAME})
|
||||||
# We should use same defines when including zlib.h as used when zlib compiled
|
# We should use same defines when including zlib.h as used when zlib compiled
|
||||||
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||||
if (TARGET zlibstatic)
|
|
||||||
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
|
||||||
endif ()
|
|
||||||
if (ARCH_AMD64 OR ARCH_AARCH64)
|
if (ARCH_AMD64 OR ARCH_AARCH64)
|
||||||
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
|
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
|
||||||
if (TARGET zlibstatic)
|
|
||||||
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
|
|
||||||
endif ()
|
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d2feb5978b979729a07c3ca76eaa4ab94cef4ceb
|
Subproject commit 377f8e77491d9f66ce8e32e88aae19dffe8dc4d7
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ee24fa55bc46e4d2ce7d0d052cc5a0d9b1be8c36
|
Subproject commit a8d43d3142cc6b26fc55bec33f7f6edb1156ab7a
|
2
contrib/datasketches-cpp
vendored
2
contrib/datasketches-cpp
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f915d35b2de676683493c86c585141a1e1c83334
|
Subproject commit 45885c0c8c0807bb9480886d60ca7042000a4c43
|
@ -1,10 +1,13 @@
|
|||||||
if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN))
|
if (SANITIZE OR NOT (
|
||||||
|
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR
|
||||||
|
(OS_DARWIN AND CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
|
||||||
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64 or ppc64le on linux or freebsd.")
|
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.")
|
||||||
endif()
|
endif ()
|
||||||
set (ENABLE_JEMALLOC OFF)
|
set (ENABLE_JEMALLOC OFF)
|
||||||
else()
|
else ()
|
||||||
option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES})
|
option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -34,9 +37,9 @@ if (OS_LINUX)
|
|||||||
# avoid spurious latencies and additional work associated with
|
# avoid spurious latencies and additional work associated with
|
||||||
# MADV_DONTNEED. See
|
# MADV_DONTNEED. See
|
||||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
# CACHE variable is empty, to allow changing defaults without necessity
|
# CACHE variable is empty, to allow changing defaults without necessity
|
||||||
# to purge cache
|
# to purge cache
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||||
* bits are the same as bit 47.
|
* bits are the same as bit 47.
|
||||||
*/
|
*/
|
||||||
#define LG_VADDR 48
|
#define LG_VADDR 64
|
||||||
|
|
||||||
/* Defined if C11 atomics are available. */
|
/* Defined if C11 atomics are available. */
|
||||||
#define JEMALLOC_C11_ATOMICS 1
|
#define JEMALLOC_C11_ATOMICS 1
|
||||||
@ -101,11 +101,6 @@
|
|||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
||||||
|
|
||||||
/*
|
|
||||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
|
||||||
*/
|
|
||||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||||
@ -181,14 +176,14 @@
|
|||||||
/* #undef LG_QUANTUM */
|
/* #undef LG_QUANTUM */
|
||||||
|
|
||||||
/* One page is 2^LG_PAGE bytes. */
|
/* One page is 2^LG_PAGE bytes. */
|
||||||
#define LG_PAGE 16
|
#define LG_PAGE 14
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||||
* system does not explicitly support huge pages; system calls that require
|
* system does not explicitly support huge pages; system calls that require
|
||||||
* explicit huge page support are separately configured.
|
* explicit huge page support are separately configured.
|
||||||
*/
|
*/
|
||||||
#define LG_HUGEPAGE 29
|
#define LG_HUGEPAGE 21
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, adjacent virtual memory mappings with identical attributes
|
* If defined, adjacent virtual memory mappings with identical attributes
|
||||||
@ -356,7 +351,7 @@
|
|||||||
/* #undef JEMALLOC_EXPORT */
|
/* #undef JEMALLOC_EXPORT */
|
||||||
|
|
||||||
/* config.malloc_conf options string. */
|
/* config.malloc_conf options string. */
|
||||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||||
|
|
||||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||||
/* #undef JEMALLOC_IS_MALLOC */
|
/* #undef JEMALLOC_IS_MALLOC */
|
||||||
|
@ -66,7 +66,7 @@
|
|||||||
#cmakedefine WITH_SASL_OAUTHBEARER 1
|
#cmakedefine WITH_SASL_OAUTHBEARER 1
|
||||||
#cmakedefine WITH_SASL_CYRUS 1
|
#cmakedefine WITH_SASL_CYRUS 1
|
||||||
// crc32chw
|
// crc32chw
|
||||||
#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32))
|
#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__))
|
||||||
#define WITH_CRC32C_HW 1
|
#define WITH_CRC32C_HW 1
|
||||||
#endif
|
#endif
|
||||||
// regex
|
// regex
|
||||||
|
63
contrib/openldap-cmake/darwin_aarch64/include/lber_types.h
Normal file
63
contrib/openldap-cmake/darwin_aarch64/include/lber_types.h
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
/* include/lber_types.h. Generated from lber_types.hin by configure. */
|
||||||
|
/* $OpenLDAP$ */
|
||||||
|
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||||
|
*
|
||||||
|
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted only as authorized by the OpenLDAP
|
||||||
|
* Public License.
|
||||||
|
*
|
||||||
|
* A copy of this license is available in file LICENSE in the
|
||||||
|
* top-level directory of the distribution or, alternatively, at
|
||||||
|
* <http://www.OpenLDAP.org/license.html>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* LBER types
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LBER_TYPES_H
|
||||||
|
#define _LBER_TYPES_H
|
||||||
|
|
||||||
|
#include <ldap_cdefs.h>
|
||||||
|
|
||||||
|
LDAP_BEGIN_DECL
|
||||||
|
|
||||||
|
/* LBER boolean, enum, integers (32 bits or larger) */
|
||||||
|
#define LBER_INT_T int
|
||||||
|
|
||||||
|
/* LBER tags (32 bits or larger) */
|
||||||
|
#define LBER_TAG_T long
|
||||||
|
|
||||||
|
/* LBER socket descriptor */
|
||||||
|
#define LBER_SOCKET_T int
|
||||||
|
|
||||||
|
/* LBER lengths (32 bits or larger) */
|
||||||
|
#define LBER_LEN_T long
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------ */
|
||||||
|
|
||||||
|
/* booleans, enumerations, and integers */
|
||||||
|
typedef LBER_INT_T ber_int_t;
|
||||||
|
|
||||||
|
/* signed and unsigned versions */
|
||||||
|
typedef signed LBER_INT_T ber_sint_t;
|
||||||
|
typedef unsigned LBER_INT_T ber_uint_t;
|
||||||
|
|
||||||
|
/* tags */
|
||||||
|
typedef unsigned LBER_TAG_T ber_tag_t;
|
||||||
|
|
||||||
|
/* "socket" descriptors */
|
||||||
|
typedef LBER_SOCKET_T ber_socket_t;
|
||||||
|
|
||||||
|
/* lengths */
|
||||||
|
typedef unsigned LBER_LEN_T ber_len_t;
|
||||||
|
|
||||||
|
/* signed lengths */
|
||||||
|
typedef signed LBER_LEN_T ber_slen_t;
|
||||||
|
|
||||||
|
LDAP_END_DECL
|
||||||
|
|
||||||
|
#endif /* _LBER_TYPES_H */
|
74
contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h
Normal file
74
contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
/* include/ldap_config.h. Generated from ldap_config.hin by configure. */
|
||||||
|
/* $OpenLDAP$ */
|
||||||
|
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||||
|
*
|
||||||
|
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted only as authorized by the OpenLDAP
|
||||||
|
* Public License.
|
||||||
|
*
|
||||||
|
* A copy of this license is available in file LICENSE in the
|
||||||
|
* top-level directory of the distribution or, alternatively, at
|
||||||
|
* <http://www.OpenLDAP.org/license.html>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file works in conjunction with OpenLDAP configure system.
|
||||||
|
* If you do no like the values below, adjust your configure options.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LDAP_CONFIG_H
|
||||||
|
#define _LDAP_CONFIG_H
|
||||||
|
|
||||||
|
/* directory separator */
|
||||||
|
#ifndef LDAP_DIRSEP
|
||||||
|
#ifndef _WIN32
|
||||||
|
#define LDAP_DIRSEP "/"
|
||||||
|
#else
|
||||||
|
#define LDAP_DIRSEP "\\"
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* directory for temporary files */
|
||||||
|
#if defined(_WIN32)
|
||||||
|
# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */
|
||||||
|
#elif defined( _P_tmpdir )
|
||||||
|
# define LDAP_TMPDIR _P_tmpdir
|
||||||
|
#elif defined( P_tmpdir )
|
||||||
|
# define LDAP_TMPDIR P_tmpdir
|
||||||
|
#elif defined( _PATH_TMPDIR )
|
||||||
|
# define LDAP_TMPDIR _PATH_TMPDIR
|
||||||
|
#else
|
||||||
|
# define LDAP_TMPDIR LDAP_DIRSEP "tmp"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* directories */
|
||||||
|
#ifndef LDAP_BINDIR
|
||||||
|
#define LDAP_BINDIR "/tmp/ldap-prefix/bin"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_SBINDIR
|
||||||
|
#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_DATADIR
|
||||||
|
#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_SYSCONFDIR
|
||||||
|
#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_LIBEXECDIR
|
||||||
|
#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_MODULEDIR
|
||||||
|
#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_RUNDIR
|
||||||
|
#define LDAP_RUNDIR "/tmp/ldap-prefix/var"
|
||||||
|
#endif
|
||||||
|
#ifndef LDAP_LOCALEDIR
|
||||||
|
#define LDAP_LOCALEDIR ""
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* _LDAP_CONFIG_H */
|
@ -0,0 +1,61 @@
|
|||||||
|
/* include/ldap_features.h. Generated from ldap_features.hin by configure. */
|
||||||
|
/* $OpenLDAP$ */
|
||||||
|
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
|
||||||
|
*
|
||||||
|
* Copyright 1998-2020 The OpenLDAP Foundation.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted only as authorized by the OpenLDAP
|
||||||
|
* Public License.
|
||||||
|
*
|
||||||
|
* A copy of this license is available in file LICENSE in the
|
||||||
|
* top-level directory of the distribution or, alternatively, at
|
||||||
|
* <http://www.OpenLDAP.org/license.html>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* LDAP Features
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LDAP_FEATURES_H
|
||||||
|
#define _LDAP_FEATURES_H 1
|
||||||
|
|
||||||
|
/* OpenLDAP API version macros */
|
||||||
|
#define LDAP_VENDOR_VERSION 20501
|
||||||
|
#define LDAP_VENDOR_VERSION_MAJOR 2
|
||||||
|
#define LDAP_VENDOR_VERSION_MINOR 5
|
||||||
|
#define LDAP_VENDOR_VERSION_PATCH X
|
||||||
|
|
||||||
|
/*
|
||||||
|
** WORK IN PROGRESS!
|
||||||
|
**
|
||||||
|
** OpenLDAP reentrancy/thread-safeness should be dynamically
|
||||||
|
** checked using ldap_get_option().
|
||||||
|
**
|
||||||
|
** The -lldap implementation is not thread-safe.
|
||||||
|
**
|
||||||
|
** The -lldap_r implementation is:
|
||||||
|
** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety)
|
||||||
|
** but also be:
|
||||||
|
** LDAP_API_FEATURE_SESSION_THREAD_SAFE
|
||||||
|
** LDAP_API_FEATURE_OPERATION_THREAD_SAFE
|
||||||
|
**
|
||||||
|
** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE
|
||||||
|
** can be used to determine if -lldap_r is available at compile
|
||||||
|
** time. You must define LDAP_THREAD_SAFE if and only if you
|
||||||
|
** link with -lldap_r.
|
||||||
|
**
|
||||||
|
** If you fail to define LDAP_THREAD_SAFE when linking with
|
||||||
|
** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap,
|
||||||
|
** provided header definitions and declarations may be incorrect.
|
||||||
|
**
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* is -lldap_r available or not */
|
||||||
|
#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1
|
||||||
|
|
||||||
|
/* LDAP v2 Referrals */
|
||||||
|
/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */
|
||||||
|
|
||||||
|
#endif /* LDAP_FEATURES */
|
1169
contrib/openldap-cmake/darwin_aarch64/include/portable.h
Normal file
1169
contrib/openldap-cmake/darwin_aarch64/include/portable.h
Normal file
File diff suppressed because it is too large
Load Diff
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138
|
Subproject commit b7d9ec16ee33ca76643d5fcd907ea9a33285640a
|
@ -233,3 +233,10 @@ else ()
|
|||||||
|
|
||||||
message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}")
|
message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if(OS_DARWIN AND ARCH_AARCH64)
|
||||||
|
target_compile_definitions (_poco_foundation
|
||||||
|
PRIVATE
|
||||||
|
POCO_NO_STAT64
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
@ -142,14 +142,14 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
|||||||
endif(HAS_ALTIVEC)
|
endif(HAS_ALTIVEC)
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||||
CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC)
|
CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC)
|
||||||
if(HAS_ARMV8_CRC)
|
if(HAS_ARMV8_CRC)
|
||||||
message(STATUS " HAS_ARMV8_CRC yes")
|
message(STATUS " HAS_ARMV8_CRC yes")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
endif(HAS_ARMV8_CRC)
|
endif(HAS_ARMV8_CRC)
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
|
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||||
|
|
||||||
|
|
||||||
include(CheckCXXSourceCompiles)
|
include(CheckCXXSourceCompiles)
|
||||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6fd1846c8b8f59436fe2dd752d0f316ddbb64df6
|
Subproject commit 7f254522fd676ff4e906c6d4e9b30d4df4214c2d
|
1
debian/clickhouse-common-static.install
vendored
1
debian/clickhouse-common-static.install
vendored
@ -3,4 +3,3 @@ usr/bin/clickhouse-odbc-bridge
|
|||||||
usr/bin/clickhouse-library-bridge
|
usr/bin/clickhouse-library-bridge
|
||||||
usr/bin/clickhouse-extract-from-config
|
usr/bin/clickhouse-extract-from-config
|
||||||
usr/share/bash-completion/completions
|
usr/share/bash-completion/completions
|
||||||
etc/security/limits.d/clickhouse.conf
|
|
||||||
|
16
debian/clickhouse-server.config
vendored
16
debian/clickhouse-server.config
vendored
@ -1,16 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
test -f /usr/share/debconf/confmodule && . /usr/share/debconf/confmodule
|
|
||||||
|
|
||||||
db_fget clickhouse-server/default-password seen || true
|
|
||||||
password_seen="$RET"
|
|
||||||
|
|
||||||
if [ "$1" = "reconfigure" ]; then
|
|
||||||
password_seen=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$password_seen" != "true" ]; then
|
|
||||||
db_input high clickhouse-server/default-password || true
|
|
||||||
db_go || true
|
|
||||||
fi
|
|
||||||
db_go || true
|
|
8
debian/clickhouse-server.postinst
vendored
8
debian/clickhouse-server.postinst
vendored
@ -23,11 +23,13 @@ if [ ! -f "/etc/debian_version" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||||
|
|
||||||
|
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||||
|
|
||||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||||
# if old rc.d service present - remove it
|
# if old rc.d service present - remove it
|
||||||
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
|
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
|
||||||
/usr/sbin/update-rc.d clickhouse-server remove
|
/usr/sbin/update-rc.d clickhouse-server remove
|
||||||
echo "ClickHouse init script has migrated to systemd. Please manually stop old server and restart the service: sudo killall clickhouse-server && sleep 5 && sudo service clickhouse-server restart"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
/bin/systemctl daemon-reload
|
/bin/systemctl daemon-reload
|
||||||
@ -38,10 +40,8 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
|||||||
if [ -x "/usr/sbin/update-rc.d" ]; then
|
if [ -x "/usr/sbin/update-rc.d" ]; then
|
||||||
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
|
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
|
||||||
else
|
else
|
||||||
echo # TODO [ "$OS" = "rhel" ] || [ "$OS" = "centos" ] || [ "$OS" = "fedora" ]
|
echo # Other OS
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
|
||||||
fi
|
fi
|
||||||
|
8
debian/clickhouse-server.preinst
vendored
8
debian/clickhouse-server.preinst
vendored
@ -1,8 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if [ "$1" = "upgrade" ]; then
|
|
||||||
# Return etc/cron.d/clickhouse-server to original state
|
|
||||||
service clickhouse-server disable_cron ||:
|
|
||||||
fi
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
6
debian/clickhouse-server.prerm
vendored
6
debian/clickhouse-server.prerm
vendored
@ -1,6 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if [ "$1" = "upgrade" ] || [ "$1" = "remove" ]; then
|
|
||||||
# Return etc/cron.d/clickhouse-server to original state
|
|
||||||
service clickhouse-server disable_cron ||:
|
|
||||||
fi
|
|
3
debian/clickhouse-server.templates
vendored
3
debian/clickhouse-server.templates
vendored
@ -1,3 +0,0 @@
|
|||||||
Template: clickhouse-server/default-password
|
|
||||||
Type: password
|
|
||||||
Description: Enter password for default user:
|
|
2
debian/clickhouse.limits
vendored
2
debian/clickhouse.limits
vendored
@ -1,2 +0,0 @@
|
|||||||
clickhouse soft nofile 262144
|
|
||||||
clickhouse hard nofile 262144
|
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -113,9 +113,6 @@ override_dh_install:
|
|||||||
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
||||||
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
||||||
|
|
||||||
mkdir -p $(DESTDIR)/etc/security/limits.d
|
|
||||||
cp debian/clickhouse.limits $(DESTDIR)/etc/security/limits.d/clickhouse.conf
|
|
||||||
|
|
||||||
# systemd compatibility
|
# systemd compatibility
|
||||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||||
|
2
debian/watch
vendored
2
debian/watch
vendored
@ -1,6 +1,6 @@
|
|||||||
version=4
|
version=4
|
||||||
|
|
||||||
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
||||||
https://github.com/yandex/clickhouse/tags \
|
https://github.com/ClickHouse/ClickHouse/tags \
|
||||||
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
||||||
|
|
||||||
|
@ -312,6 +312,8 @@ function run_tests
|
|||||||
01533_collate_in_nullable
|
01533_collate_in_nullable
|
||||||
01542_collate_in_array
|
01542_collate_in_array
|
||||||
01543_collate_in_tuple
|
01543_collate_in_tuple
|
||||||
|
01798_uniq_theta_sketch
|
||||||
|
01799_long_uniq_theta_sketch
|
||||||
_orc_
|
_orc_
|
||||||
arrow
|
arrow
|
||||||
avro
|
avro
|
||||||
@ -367,6 +369,9 @@ function run_tests
|
|||||||
# JSON functions
|
# JSON functions
|
||||||
01666_blns
|
01666_blns
|
||||||
|
|
||||||
|
# Requires postgresql-client
|
||||||
|
01802_test_postgresql_protocol_with_row_policy
|
||||||
|
|
||||||
# Depends on AWS
|
# Depends on AWS
|
||||||
01801_s3_cluster
|
01801_s3_cluster
|
||||||
)
|
)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
<yandex>
|
<yandex>
|
||||||
<http_port remove="remove"/>
|
<http_port remove="remove"/>
|
||||||
<mysql_port remove="remove"/>
|
<mysql_port remove="remove"/>
|
||||||
|
<postgresql_port remove="remove"/>
|
||||||
<interserver_http_port remove="remove"/>
|
<interserver_http_port remove="remove"/>
|
||||||
<tcp_with_proxy_port remove="remove"/>
|
<tcp_with_proxy_port remove="remove"/>
|
||||||
<keeper_server remove="remove"/>
|
<keeper_server remove="remove"/>
|
||||||
|
@ -66,7 +66,12 @@ reportStageEnd('parse')
|
|||||||
subst_elems = root.findall('substitutions/substitution')
|
subst_elems = root.findall('substitutions/substitution')
|
||||||
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
||||||
for e in subst_elems:
|
for e in subst_elems:
|
||||||
available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')]
|
name = e.find('name').text
|
||||||
|
values = [v.text for v in e.findall('values/value')]
|
||||||
|
if not values:
|
||||||
|
raise Exception(f'No values given for substitution {{{name}}}')
|
||||||
|
|
||||||
|
available_parameters[name] = values
|
||||||
|
|
||||||
# Takes parallel lists of templates, substitutes them with all combos of
|
# Takes parallel lists of templates, substitutes them with all combos of
|
||||||
# parameters. The set of parameters is determined based on the first list.
|
# parameters. The set of parameters is determined based on the first list.
|
||||||
|
@ -21,14 +21,14 @@ function start()
|
|||||||
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
||||||
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
||||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||||
--mysql_port 19004 \
|
--mysql_port 19004 --postgresql_port 19005 \
|
||||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2
|
--keeper_server.tcp_port 19181 --keeper_server.server_id 2
|
||||||
|
|
||||||
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
|
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
|
||||||
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
||||||
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
||||||
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
||||||
--mysql_port 29004 \
|
--mysql_port 29004 --postgresql_port 29005 \
|
||||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3
|
--keeper_server.tcp_port 29181 --keeper_server.server_id 3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -28,7 +28,8 @@ RUN apt-get update -y \
|
|||||||
tree \
|
tree \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
wget \
|
wget \
|
||||||
mysql-client=5.7*
|
mysql-client=5.7* \
|
||||||
|
postgresql-client
|
||||||
|
|
||||||
RUN pip3 install numpy scipy pandas
|
RUN pip3 install numpy scipy pandas
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
|
||||||
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
|
||||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||||
--mysql_port 19004 \
|
--mysql_port 19004 --postgresql_port 19005 \
|
||||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
||||||
--macros.replica r2 # It doesn't work :(
|
--macros.replica r2 # It doesn't work :(
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
|
||||||
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
|
||||||
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
|
||||||
--mysql_port 29004 \
|
--mysql_port 29004 --postgresql_port 29005 \
|
||||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
||||||
--macros.shard s2 # It doesn't work :(
|
--macros.shard s2 # It doesn't work :(
|
||||||
|
|
||||||
@ -112,10 +112,13 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
|||||||
fi
|
fi
|
||||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||||
|
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||||
|
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||||
|
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
@ -5,12 +5,13 @@ toc_title: Build on Mac OS X
|
|||||||
|
|
||||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||||
|
|
||||||
Build should work on x86_64 (Intel) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers.
|
Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers.
|
||||||
|
|
||||||
## Install Homebrew {#install-homebrew}
|
## Install Homebrew {#install-homebrew}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||||
|
# ...and follow the printed instructions on any additional steps required to complete the installation.
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools}
|
## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools}
|
||||||
@ -22,8 +23,8 @@ Open it at least once to accept the end-user license agreement and automatically
|
|||||||
Then, make sure that the latest Comman Line Tools are installed and selected in the system:
|
Then, make sure that the latest Comman Line Tools are installed and selected in the system:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ sudo rm -rf /Library/Developer/CommandLineTools
|
sudo rm -rf /Library/Developer/CommandLineTools
|
||||||
$ sudo xcode-select --install
|
sudo xcode-select --install
|
||||||
```
|
```
|
||||||
|
|
||||||
Reboot.
|
Reboot.
|
||||||
@ -31,14 +32,15 @@ Reboot.
|
|||||||
## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries}
|
## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ brew update
|
brew update
|
||||||
$ brew install cmake ninja libtool gettext llvm gcc
|
brew install cmake ninja libtool gettext llvm gcc
|
||||||
```
|
```
|
||||||
|
|
||||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://github.com/ClickHouse/ClickHouse.git
|
git clone --recursive git@github.com:ClickHouse/ClickHouse.git
|
||||||
|
# ...alternatively, you can use https://github.com/ClickHouse/ClickHouse.git as the repo URL.
|
||||||
```
|
```
|
||||||
|
|
||||||
## Build ClickHouse {#build-clickhouse}
|
## Build ClickHouse {#build-clickhouse}
|
||||||
@ -46,37 +48,37 @@ $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://gi
|
|||||||
To build using Xcode's native AppleClang compiler:
|
To build using Xcode's native AppleClang compiler:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ cd ClickHouse
|
cd ClickHouse
|
||||||
$ rm -rf build
|
rm -rf build
|
||||||
$ mkdir build
|
mkdir build
|
||||||
$ cd build
|
cd build
|
||||||
$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||||
$ cmake --build . --config RelWithDebInfo
|
cmake --build . --config RelWithDebInfo
|
||||||
$ cd ..
|
cd ..
|
||||||
```
|
```
|
||||||
|
|
||||||
To build using Homebrew's vanilla Clang compiler:
|
To build using Homebrew's vanilla Clang compiler:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ cd ClickHouse
|
cd ClickHouse
|
||||||
$ rm -rf build
|
rm -rf build
|
||||||
$ mkdir build
|
mkdir build
|
||||||
$ cd build
|
cd build
|
||||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||||
$ cmake --build . --config RelWithDebInfo
|
cmake --build . --config RelWithDebInfo
|
||||||
$ cd ..
|
cd ..
|
||||||
```
|
```
|
||||||
|
|
||||||
To build using Homebrew's vanilla GCC compiler:
|
To build using Homebrew's vanilla GCC compiler:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ cd ClickHouse
|
cd ClickHouse
|
||||||
$ rm -rf build
|
rm -rf build
|
||||||
$ mkdir build
|
mkdir build
|
||||||
$ cd build
|
cd build
|
||||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||||
$ cmake --build . --config RelWithDebInfo
|
cmake --build . --config RelWithDebInfo
|
||||||
$ cd ..
|
cd ..
|
||||||
```
|
```
|
||||||
|
|
||||||
## Caveats {#caveats}
|
## Caveats {#caveats}
|
||||||
@ -115,7 +117,7 @@ To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the
|
|||||||
Execute the following command:
|
Execute the following command:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist
|
sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist
|
||||||
```
|
```
|
||||||
|
|
||||||
Reboot.
|
Reboot.
|
||||||
|
@ -90,6 +90,7 @@ The following settings can be specified in configuration file for given endpoint
|
|||||||
- `endpoint` — Specifies prefix of an endpoint. Mandatory.
|
- `endpoint` — Specifies prefix of an endpoint. Mandatory.
|
||||||
- `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional.
|
- `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional.
|
||||||
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`.
|
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`.
|
||||||
|
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
|
||||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
|
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
|
||||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||||
|
|
||||||
@ -102,11 +103,13 @@ The following settings can be specified in configuration file for given endpoint
|
|||||||
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
|
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
|
||||||
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
|
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
|
||||||
<!-- <use_environment_credentials>false</use_environment_credentials> -->
|
<!-- <use_environment_credentials>false</use_environment_credentials> -->
|
||||||
|
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
|
||||||
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
|
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
|
||||||
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->
|
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->
|
||||||
</endpoint-name>
|
</endpoint-name>
|
||||||
</s3>
|
</s3>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage {#usage-examples}
|
## Usage {#usage-examples}
|
||||||
|
|
||||||
Suppose we have several files in TSV format with the following URIs on HDFS:
|
Suppose we have several files in TSV format with the following URIs on HDFS:
|
||||||
@ -149,6 +152,7 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_p
|
|||||||
CREATE TABLE big_table (name String, value UInt32)
|
CREATE TABLE big_table (name String, value UInt32)
|
||||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||||
```
|
```
|
||||||
|
|
||||||
## See also
|
## See also
|
||||||
|
|
||||||
- [S3 table function](../../../sql-reference/table-functions/s3.md)
|
- [S3 table function](../../../sql-reference/table-functions/s3.md)
|
||||||
|
@ -767,6 +767,7 @@ Required parameters:
|
|||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
||||||
|
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
|
||||||
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
|
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
|
||||||
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
|
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
|
||||||
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
|
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
|
||||||
|
@ -18,11 +18,17 @@ Engine parameters:
|
|||||||
- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16.
|
- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16.
|
||||||
- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer.
|
- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer.
|
||||||
|
|
||||||
|
Optional engine parameters:
|
||||||
|
|
||||||
|
- `flush_time`, `flush_rows`, `flush_bytes` – Conditions for flushing data from the buffer, that will happen only in background (ommited or zero means no `flush*` parameters).
|
||||||
|
|
||||||
Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met.
|
Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met.
|
||||||
|
|
||||||
- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer.
|
Also if at least one `flush*` condition are met flush initiated in background, this is different from `max*`, since `flush*` allows you to configure background flushes separately to avoid adding latency for `INSERT` (into `Buffer`) queries.
|
||||||
- `min_rows`, `max_rows` – Condition for the number of rows in the buffer.
|
|
||||||
- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer.
|
- `min_time`, `max_time`, `flush_time` – Condition for the time in seconds from the moment of the first write to the buffer.
|
||||||
|
- `min_rows`, `max_rows`, `flush_rows` – Condition for the number of rows in the buffer.
|
||||||
|
- `min_bytes`, `max_bytes`, `flush_bytes` – Condition for the number of bytes in the buffer.
|
||||||
|
|
||||||
During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer.
|
During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer.
|
||||||
|
|
||||||
|
9
docs/en/interfaces/third-party/gui.md
vendored
9
docs/en/interfaces/third-party/gui.md
vendored
@ -169,24 +169,21 @@ Features:
|
|||||||
|
|
||||||
### SeekTable {#seektable}
|
### SeekTable {#seektable}
|
||||||
|
|
||||||
[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. SeekTable is available both as a cloud service and a self-hosted version. SeekTable reports may be embedded into any web-app.
|
[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. It is available both as a cloud service and a self-hosted version. Reports from SeekTable may be embedded into any web-app.
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
- Business users-friendly reports builder.
|
- Business users-friendly reports builder.
|
||||||
- Powerful report parameters for SQL filtering and report-specific query customizations.
|
- Powerful report parameters for SQL filtering and report-specific query customizations.
|
||||||
- Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers).
|
- Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers).
|
||||||
- It is possible to use all power of CH SQL dialect in dimensions/measures definitions
|
- It is possible to use all power of ClickHouse SQL dialect in dimensions/measures definitions.
|
||||||
- [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation.
|
- [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation.
|
||||||
- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore), data models (cubes) / reports configuration is a human-readable XML and can be stored under version control.
|
- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore); data models (cubes) / reports configuration is a human-readable XML and can be stored under version control system.
|
||||||
|
|
||||||
SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage.
|
SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage.
|
||||||
|
|
||||||
[How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table)
|
[How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table)
|
||||||
|
|
||||||
|
|
||||||
### Chadmin {#chadmin}
|
### Chadmin {#chadmin}
|
||||||
|
|
||||||
[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want.
|
[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) <!--hide-->
|
|
||||||
|
@ -38,3 +38,4 @@ We recommend using this function in almost all scenarios.
|
|||||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||||
|
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||||
|
@ -49,3 +49,4 @@ Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq
|
|||||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||||
|
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||||
|
@ -23,3 +23,4 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, `
|
|||||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined)
|
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined)
|
||||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12)
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12)
|
||||||
|
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||||
|
@ -37,3 +37,4 @@ We don’t recommend using this function. In most cases, use the [uniq](../../..
|
|||||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||||
|
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||||
|
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 195
|
||||||
|
---
|
||||||
|
|
||||||
|
# uniqThetaSketch {#agg_function-uniqthetasketch}
|
||||||
|
|
||||||
|
Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
uniqThetaSketch(x[, ...])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A [UInt64](../../../sql-reference/data-types/int-uint.md)-type number.
|
||||||
|
|
||||||
|
**Implementation details**
|
||||||
|
|
||||||
|
Function:
|
||||||
|
|
||||||
|
- Calculates a hash for all parameters in the aggregate, then uses it in calculations.
|
||||||
|
|
||||||
|
- Uses the [KMV](https://datasketches.apache.org/docs/Theta/InverseEstimate.html) algorithm to approximate the number of different argument values.
|
||||||
|
|
||||||
|
4096(2^12) 64-bit sketch are used. The size of the state is about 41 KB.
|
||||||
|
|
||||||
|
- The relative error is 3.125% (95% confidence), see the [relative error table](https://datasketches.apache.org/docs/Theta/ThetaErrorTable.html) for detail.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||||
|
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||||
|
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||||
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||||
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
@ -16,7 +16,7 @@ The following operations with [partitions](../../../engines/table-engines/merget
|
|||||||
- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition.
|
- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition.
|
||||||
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition.
|
- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition.
|
||||||
- [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition.
|
- [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition.
|
||||||
- [FETCH PARTITION](#alter_fetch-partition) — Downloads a partition from another server.
|
- [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server.
|
||||||
- [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume.
|
- [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume.
|
||||||
|
|
||||||
<!-- -->
|
<!-- -->
|
||||||
@ -198,29 +198,35 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr
|
|||||||
|
|
||||||
The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data.
|
The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data.
|
||||||
|
|
||||||
## FETCH PARTITION {#alter_fetch-partition}
|
## FETCH PARTITION|PART {#alter_fetch-partition}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper'
|
ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper'
|
||||||
```
|
```
|
||||||
|
|
||||||
Downloads a partition from another server. This query only works for the replicated tables.
|
Downloads a partition from another server. This query only works for the replicated tables.
|
||||||
|
|
||||||
The query does the following:
|
The query does the following:
|
||||||
|
|
||||||
1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper.
|
1. Downloads the partition|part from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper.
|
||||||
2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table.
|
2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
|
1. FETCH PARTITION
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits';
|
ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits';
|
||||||
ALTER TABLE users ATTACH PARTITION 201902;
|
ALTER TABLE users ATTACH PARTITION 201902;
|
||||||
```
|
```
|
||||||
|
2. FETCH PART
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits';
|
||||||
|
ALTER TABLE users ATTACH PART 201901_2_2_0;
|
||||||
|
```
|
||||||
|
|
||||||
Note that:
|
Note that:
|
||||||
|
|
||||||
- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server.
|
- The `ALTER ... FETCH PARTITION|PART` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server.
|
||||||
- The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas.
|
- The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas.
|
||||||
|
|
||||||
Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas.
|
Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas.
|
||||||
|
@ -5,39 +5,81 @@ toc_title: ROW POLICY
|
|||||||
|
|
||||||
# CREATE ROW POLICY {#create-row-policy-statement}
|
# CREATE ROW POLICY {#create-row-policy-statement}
|
||||||
|
|
||||||
Creates [filters for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table.
|
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
|
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
|
||||||
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
|
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
|
||||||
|
[FOR SELECT] USING condition
|
||||||
[AS {PERMISSIVE | RESTRICTIVE}]
|
[AS {PERMISSIVE | RESTRICTIVE}]
|
||||||
[FOR SELECT]
|
|
||||||
[USING condition]
|
|
||||||
[TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]
|
[TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]
|
||||||
```
|
```
|
||||||
|
|
||||||
`ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
## USING Clause {#create-row-policy-using}
|
||||||
|
|
||||||
## AS Clause {#create-row-policy-as}
|
Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row.
|
||||||
|
|
||||||
Using this section you can create permissive or restrictive policies.
|
|
||||||
|
|
||||||
Permissive policy grants access to rows. Permissive policies which apply to the same table are combined together using the boolean `OR` operator. Policies are permissive by default.
|
|
||||||
|
|
||||||
Restrictive policy restricts access to rows. Restrictive policies which apply to the same table are combined together using the boolean `AND` operator.
|
|
||||||
|
|
||||||
Restrictive policies apply to rows that passed the permissive filters. If you set restrictive policies but no permissive policies, the user can’t get any row from the table.
|
|
||||||
|
|
||||||
## TO Clause {#create-row-policy-to}
|
## TO Clause {#create-row-policy-to}
|
||||||
|
|
||||||
In the section `TO` you can provide a mixed list of roles and users, for example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
||||||
|
|
||||||
Keyword `ALL` means all the ClickHouse users including current user. Keywords `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
||||||
|
|
||||||
## Examples {#examples}
|
!!! note "Note"
|
||||||
|
If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy
|
||||||
|
|
||||||
|
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
|
||||||
|
|
||||||
`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost`
|
forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
|
||||||
|
|
||||||
|
If that's not desirable it can't be fixed by adding one more row policy, like the following:
|
||||||
|
|
||||||
`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira`
|
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
|
||||||
|
|
||||||
|
## AS Clause {#create-row-policy-as}
|
||||||
|
|
||||||
|
It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies.
|
||||||
|
|
||||||
|
By default policies are combined using the boolean `OR` operator. For example, the following policies
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
|
||||||
|
```
|
||||||
|
|
||||||
|
enables the user `peter` to see rows with either `b=1` or `c=2`.
|
||||||
|
|
||||||
|
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator.
|
||||||
|
|
||||||
|
A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator.
|
||||||
|
|
||||||
|
Here is the general formula:
|
||||||
|
|
||||||
|
```
|
||||||
|
row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND
|
||||||
|
(all of the restrictive policies's conditions are non-zero)
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the following policies
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||||
|
```
|
||||||
|
|
||||||
|
enables the user `peter` to see rows only if both `b=1` AND `c=2`.
|
||||||
|
|
||||||
|
## ON CLUSTER Clause {#create-row-policy-on-cluster}
|
||||||
|
|
||||||
|
Allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost`
|
||||||
|
|
||||||
|
`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira`
|
||||||
|
|
||||||
|
`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin`
|
||||||
|
@ -50,15 +50,32 @@ Creates a table with the same result as that of the [table function](../../../sq
|
|||||||
### From SELECT query {#from-select-query}
|
### From SELECT query {#from-select-query}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from SELECT.
|
Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from `SELECT`. Also you can explicitly specify columns description.
|
||||||
|
|
||||||
In all cases, if `IF NOT EXISTS` is specified, the query won’t return an error if the table already exists. In this case, the query won’t do anything.
|
If the table already exists and `IF NOT EXISTS` is specified, the query won’t do anything.
|
||||||
|
|
||||||
There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines).
|
There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1;
|
||||||
|
SELECT x, toTypeName(x) FROM t1;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─x─┬─toTypeName(x)─┐
|
||||||
|
│ 1 │ String │
|
||||||
|
└───┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## NULL Or NOT NULL Modifiers {#null-modifiers}
|
## NULL Or NOT NULL Modifiers {#null-modifiers}
|
||||||
|
|
||||||
`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
|
`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||||
|
@ -91,7 +91,7 @@ Hierarchy of privileges:
|
|||||||
- `ALTER ADD CONSTRAINT`
|
- `ALTER ADD CONSTRAINT`
|
||||||
- `ALTER DROP CONSTRAINT`
|
- `ALTER DROP CONSTRAINT`
|
||||||
- `ALTER TTL`
|
- `ALTER TTL`
|
||||||
- `ALTER MATERIALIZE TTL`
|
- `ALTER MATERIALIZE TTL`
|
||||||
- `ALTER SETTINGS`
|
- `ALTER SETTINGS`
|
||||||
- `ALTER MOVE PARTITION`
|
- `ALTER MOVE PARTITION`
|
||||||
- `ALTER FETCH PARTITION`
|
- `ALTER FETCH PARTITION`
|
||||||
@ -102,9 +102,9 @@ Hierarchy of privileges:
|
|||||||
- [CREATE](#grant-create)
|
- [CREATE](#grant-create)
|
||||||
- `CREATE DATABASE`
|
- `CREATE DATABASE`
|
||||||
- `CREATE TABLE`
|
- `CREATE TABLE`
|
||||||
|
- `CREATE TEMPORARY TABLE`
|
||||||
- `CREATE VIEW`
|
- `CREATE VIEW`
|
||||||
- `CREATE DICTIONARY`
|
- `CREATE DICTIONARY`
|
||||||
- `CREATE TEMPORARY TABLE`
|
|
||||||
- [DROP](#grant-drop)
|
- [DROP](#grant-drop)
|
||||||
- `DROP DATABASE`
|
- `DROP DATABASE`
|
||||||
- `DROP TABLE`
|
- `DROP TABLE`
|
||||||
@ -150,7 +150,7 @@ Hierarchy of privileges:
|
|||||||
- `SYSTEM RELOAD`
|
- `SYSTEM RELOAD`
|
||||||
- `SYSTEM RELOAD CONFIG`
|
- `SYSTEM RELOAD CONFIG`
|
||||||
- `SYSTEM RELOAD DICTIONARY`
|
- `SYSTEM RELOAD DICTIONARY`
|
||||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`
|
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`
|
||||||
- `SYSTEM MERGES`
|
- `SYSTEM MERGES`
|
||||||
- `SYSTEM TTL MERGES`
|
- `SYSTEM TTL MERGES`
|
||||||
- `SYSTEM FETCHES`
|
- `SYSTEM FETCHES`
|
||||||
@ -276,10 +276,10 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries
|
|||||||
- `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT`
|
- `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT`
|
||||||
- `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT`
|
- `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT`
|
||||||
- `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL`
|
- `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL`
|
||||||
- `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL`
|
- `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL`
|
||||||
- `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
- `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
||||||
- `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
- `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
||||||
- `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION`
|
- `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART`
|
||||||
- `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION`
|
- `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION`
|
||||||
- `ALTER VIEW` Level: `GROUP`
|
- `ALTER VIEW` Level: `GROUP`
|
||||||
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
|
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
|
||||||
@ -304,9 +304,9 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A
|
|||||||
- `CREATE`. Level: `GROUP`
|
- `CREATE`. Level: `GROUP`
|
||||||
- `CREATE DATABASE`. Level: `DATABASE`
|
- `CREATE DATABASE`. Level: `DATABASE`
|
||||||
- `CREATE TABLE`. Level: `TABLE`
|
- `CREATE TABLE`. Level: `TABLE`
|
||||||
|
- `CREATE TEMPORARY TABLE`. Level: `GLOBAL`
|
||||||
- `CREATE VIEW`. Level: `VIEW`
|
- `CREATE VIEW`. Level: `VIEW`
|
||||||
- `CREATE DICTIONARY`. Level: `DICTIONARY`
|
- `CREATE DICTIONARY`. Level: `DICTIONARY`
|
||||||
- `CREATE TEMPORARY TABLE`. Level: `GLOBAL`
|
|
||||||
|
|
||||||
**Notes**
|
**Notes**
|
||||||
|
|
||||||
@ -401,7 +401,7 @@ Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) quer
|
|||||||
- `SYSTEM RELOAD`. Level: `GROUP`
|
- `SYSTEM RELOAD`. Level: `GROUP`
|
||||||
- `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG`
|
- `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG`
|
||||||
- `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
|
- `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
|
||||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: R`ELOAD EMBEDDED DICTIONARIES`
|
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: `RELOAD EMBEDDED DICTIONARIES`
|
||||||
- `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
|
- `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
|
||||||
- `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
|
- `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
|
||||||
- `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
|
- `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
|
||||||
|
@ -5,13 +5,18 @@ toc_title: OPTIMIZE
|
|||||||
|
|
||||||
# OPTIMIZE Statement {#misc_operations-optimize}
|
# OPTIMIZE Statement {#misc_operations-optimize}
|
||||||
|
|
||||||
|
This query tries to initialize an unscheduled merge of data parts for tables.
|
||||||
|
|
||||||
|
!!! warning "Warning"
|
||||||
|
`OPTIMIZE` can’t fix the `Too many parts` error.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
||||||
```
|
```
|
||||||
|
|
||||||
This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family.
|
The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
||||||
|
|
||||||
The `OPTMIZE` query is also supported for the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
|
||||||
|
|
||||||
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled).
|
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled).
|
||||||
|
|
||||||
@ -21,12 +26,13 @@ When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engin
|
|||||||
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
|
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
|
||||||
|
|
||||||
|
|
||||||
### BY expression {#by-expression}
|
## BY expression {#by-expression}
|
||||||
|
|
||||||
If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key).
|
If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key).
|
||||||
|
|
||||||
Note that `*` behaves just like in `SELECT`: `MATERIALIZED`, and `ALIAS` columns are not used for expansion.
|
!!! note "Note"
|
||||||
Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column.
|
Notice that `*` behaves just like in `SELECT`: `MATERIALIZED` and `ALIAS` columns are not used for expansion.
|
||||||
|
Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
OPTIMIZE TABLE table DEDUPLICATE; -- the old one
|
OPTIMIZE TABLE table DEDUPLICATE; -- the old one
|
||||||
@ -39,9 +45,10 @@ OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT co
|
|||||||
OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY);
|
OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY);
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example:**
|
**Examples**
|
||||||
|
|
||||||
|
Create a table:
|
||||||
|
|
||||||
A silly synthetic table.
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE example (
|
CREATE TABLE example (
|
||||||
primary_key Int32,
|
primary_key Int32,
|
||||||
@ -56,31 +63,31 @@ PARTITION BY partition_key
|
|||||||
ORDER BY (primary_key, secondary_key);
|
ORDER BY (primary_key, secondary_key);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
-- The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row.
|
|
||||||
OPTIMIZE TABLE example FINAL DEDUPLICATE;
|
OPTIMIZE TABLE example FINAL DEDUPLICATE;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
-- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns.
|
|
||||||
OPTIMIZE TABLE example FINAL DEDUPLICATE BY *;
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY *;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
-- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns.
|
|
||||||
OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value;
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns.
|
||||||
``` sql
|
``` sql
|
||||||
-- Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns.
|
|
||||||
OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key;
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
-- Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns.
|
|
||||||
OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key');
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
!!! warning "Warning"
|
|
||||||
`OPTIMIZE` can’t fix the “Too many parts” error.
|
|
||||||
|
@ -39,4 +39,20 @@ toc_title: "Поставщики облачных услуг ClickHouse"
|
|||||||
- поддержка прав доступа, one-key восстановления, многоуровневая защита сети, шифрование облачного диска;
|
- поддержка прав доступа, one-key восстановления, многоуровневая защита сети, шифрование облачного диска;
|
||||||
- полная интеграция с облачными системами логирования, базами данных и инструментами обработки данных;
|
- полная интеграция с облачными системами логирования, базами данных и инструментами обработки данных;
|
||||||
- встроенная платформа для мониторинга и управления базами данных;
|
- встроенная платформа для мониторинга и управления базами данных;
|
||||||
- техническая поддержка от экспертов по работе с базами данных.
|
- техническая поддержка от экспертов по работе с базами данных.
|
||||||
|
|
||||||
|
## SberCloud {#sbercloud}
|
||||||
|
|
||||||
|
[Облачная платформа SberCloud.Advanced](https://sbercloud.ru/ru/advanced):
|
||||||
|
|
||||||
|
- предоставляет более 50 высокотехнологичных сервисов;
|
||||||
|
- позволяет быстро создавать и эффективно управлять ИТ-инфраструктурой, приложениями и интернет-сервисами;
|
||||||
|
- радикально минимизирует ресурсы, требуемые для работы корпоративных ИТ-систем;
|
||||||
|
- в разы сокращает время вывода новых продуктов на рынок.
|
||||||
|
|
||||||
|
SberCloud.Advanced предоставляет [MapReduce Service (MRS)](https://docs.sbercloud.ru/mrs/ug/topics/ug__clickhouse.html) — надежную, безопасную и простую в использовании платформу корпоративного уровня для хранения, обработки и анализа больших данных. MRS позволяет быстро создавать и управлять кластерами ClickHouse.
|
||||||
|
|
||||||
|
- Инстанс ClickHouse состоит из трех узлов ZooKeeper и нескольких узлов ClickHouse. Выделенный режим реплики используется для обеспечения высокой надежности двойных копий данных.
|
||||||
|
- MRS предлагает возможности гибкого масштабирования при быстром росте сервисов в сценариях, когда емкости кластерного хранилища или вычислительных ресурсов процессора недостаточно. MRS в один клик предоставляет инструмент для балансировки данных при расширении узлов ClickHouse в кластере. Вы можете определить режим и время балансировки данных на основе характеристик сервиса, чтобы обеспечить доступность сервиса.
|
||||||
|
- MRS использует архитектуру развертывания высокой доступности на основе Elastic Load Balance (ELB) — сервиса для автоматического распределения трафика на несколько внутренних узлов. Благодаря ELB, данные записываются в локальные таблицы и считываются из распределенных таблиц на разных узлах. Такая архитектура повышает отказоустойчивость кластера и гарантирует высокую доступность приложений.
|
||||||
|
|
||||||
|
@ -753,7 +753,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
|
|
||||||
Необязательные параметры:
|
Необязательные параметры:
|
||||||
|
|
||||||
- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`.
|
- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`.
|
||||||
|
- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`.
|
||||||
- `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера.
|
- `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера.
|
||||||
- `connect_timeout_ms` — таймаут подключения к сокету в миллисекундах. Значение по умолчанию: 10 секунд.
|
- `connect_timeout_ms` — таймаут подключения к сокету в миллисекундах. Значение по умолчанию: 10 секунд.
|
||||||
- `request_timeout_ms` — таймаут выполнения запроса в миллисекундах. Значение по умолчанию: 5 секунд.
|
- `request_timeout_ms` — таймаут выполнения запроса в миллисекундах. Значение по умолчанию: 5 секунд.
|
||||||
|
@ -121,6 +121,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
|
|||||||
- `--user, -u` — имя пользователя, по умолчанию — ‘default’.
|
- `--user, -u` — имя пользователя, по умолчанию — ‘default’.
|
||||||
- `--password` — пароль, по умолчанию — пустая строка.
|
- `--password` — пароль, по умолчанию — пустая строка.
|
||||||
- `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме.
|
- `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме.
|
||||||
|
- `--queries-file, -qf` - путь к файлу с запросами для выполнения. Необходимо указать только одну из опций: `query` или `queries-file`.
|
||||||
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’).
|
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’).
|
||||||
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
|
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
|
||||||
- `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой.
|
- `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой.
|
||||||
@ -130,6 +131,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
|
|||||||
- `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс.
|
- `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс.
|
||||||
- `--config-file` — имя конфигурационного файла.
|
- `--config-file` — имя конфигурационного файла.
|
||||||
- `--secure` — если указано, будет использован безопасный канал.
|
- `--secure` — если указано, будет использован безопасный канал.
|
||||||
|
- `--history_file` - путь к файлу с историей команд.
|
||||||
- `--param_<name>` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters).
|
- `--param_<name>` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters).
|
||||||
|
|
||||||
Начиная с версии 20.5, в `clickhouse-client` есть автоматическая подсветка синтаксиса (включена всегда).
|
Начиная с версии 20.5, в `clickhouse-client` есть автоматическая подсветка синтаксиса (включена всегда).
|
||||||
|
17
docs/ru/interfaces/third-party/gui.md
vendored
17
docs/ru/interfaces/third-party/gui.md
vendored
@ -166,4 +166,19 @@ toc_title: "Визуальные интерфейсы от сторонних р
|
|||||||
|
|
||||||
[Как сконфигурировать ClickHouse в Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse)
|
[Как сконфигурировать ClickHouse в Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse)
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/ru/interfaces/third-party/gui/) <!--hide-->
|
### SeekTable {#seektable}
|
||||||
|
|
||||||
|
[SeekTable](https://www.seektable.com) — это аналитический инструмент для самостоятельного анализа и обработки данных бизнес-аналитики. Он доступен как в виде облачного сервиса, так и в виде локальной версии. Отчеты из SeekTable могут быть встроены в любое веб-приложение.
|
||||||
|
|
||||||
|
Основные возможности:
|
||||||
|
|
||||||
|
- Удобный конструктор отчетов.
|
||||||
|
- Гибкая настройка отчетов SQL и создание запросов для специфичных отчетов.
|
||||||
|
- Интегрируется с ClickHouse, используя собственную точку приема запроса TCP/IP или интерфейс HTTP(S) (два разных драйвера).
|
||||||
|
- Поддерживает всю мощь диалекта ClickHouse SQL для построения запросов по различным измерениям и показателям.
|
||||||
|
- [WEB-API](https://www.seektable.com/help/web-api-integration) для автоматизированной генерации отчетов.
|
||||||
|
- Процесс разработки отчетов поддерживает [резервное копирование/восстановление данных](https://www.seektable.com/help/self-hosted-backup-restore); конфигурация моделей данных (кубов) / отчетов представляет собой удобочитаемый XML-файл, который может храниться в системе контроля версий.
|
||||||
|
|
||||||
|
SeekTable [бесплатен](https://www.seektable.com/help/cloud-pricing) для личного/индивидуального использования.
|
||||||
|
|
||||||
|
[Как сконфигурировать подключение ClickHouse в SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table)
|
||||||
|
@ -5,7 +5,7 @@ toc_title: "Политика доступа"
|
|||||||
|
|
||||||
# CREATE ROW POLICY {#create-row-policy-statement}
|
# CREATE ROW POLICY {#create-row-policy-statement}
|
||||||
|
|
||||||
Создает [фильтры для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы.
|
Создает [политики доступа к строкам](../../../operations/access-rights.md#row-policy-management), т.е. фильтры, которые определяют, какие строки пользователь может читать из таблицы.
|
||||||
|
|
||||||
Синтаксис:
|
Синтаксис:
|
||||||
|
|
||||||
@ -13,33 +13,74 @@ toc_title: "Политика доступа"
|
|||||||
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
|
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
|
||||||
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
|
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
|
||||||
[AS {PERMISSIVE | RESTRICTIVE}]
|
[AS {PERMISSIVE | RESTRICTIVE}]
|
||||||
[FOR SELECT]
|
[FOR SELECT] USING condition
|
||||||
[USING condition]
|
|
||||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||||
```
|
```
|
||||||
|
|
||||||
Секция `ON CLUSTER` позволяет создавать фильтры для строк на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
|
## Секция USING {#create-row-policy-using}
|
||||||
|
|
||||||
## Секция AS {#create-row-policy-as}
|
Секция `USING` указывает условие для фильтрации строк. Пользователь может видеть строку, если это условие, вычисленное для строки, дает ненулевой результат.
|
||||||
|
|
||||||
С помощью данной секции можно создать политику разрешения или ограничения.
|
|
||||||
|
|
||||||
Политика разрешения предоставляет доступ к строкам. Разрешительные политики, которые применяются к одной таблице, объединяются с помощью логического оператора `OR`. Политики являются разрешительными по умолчанию.
|
|
||||||
|
|
||||||
Политика ограничения запрещает доступ к строкам. Ограничительные политики, которые применяются к одной таблице, объединяются логическим оператором `AND`.
|
|
||||||
|
|
||||||
Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы.
|
|
||||||
|
|
||||||
## Секция TO {#create-row-policy-to}
|
## Секция TO {#create-row-policy-to}
|
||||||
|
|
||||||
В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
В секции `TO` перечисляются пользователи и роли, для которых должна действовать политика. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
||||||
|
|
||||||
Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
||||||
|
|
||||||
|
!!! note "Note"
|
||||||
|
Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить команду SELECT и получить все строки таблицы. Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей (даже для тех, для кого политики не определялись). Например, следующая политика
|
||||||
|
|
||||||
|
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
|
||||||
|
|
||||||
|
запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) видеть какие-либо строки вообще из таблицы `mydb.table1`.
|
||||||
|
|
||||||
|
Если это нежелательно, такое поведение можно исправить, определив дополнительную политику:
|
||||||
|
|
||||||
|
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
|
||||||
|
|
||||||
|
## Секция AS {#create-row-policy-as}
|
||||||
|
|
||||||
|
Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. Поэтому нам нужен способ комбинировать политики.
|
||||||
|
|
||||||
|
По умолчанию политики комбинируются с использованием логического оператора `OR`. Например, политики:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
|
||||||
|
```
|
||||||
|
|
||||||
|
разрешат пользователю с именем `peter` видеть строки, для которых будет верно `b=1` или `c=2`.
|
||||||
|
|
||||||
|
Секция `AS` указывает, как политики должны комбинироваться с другими политиками. Политики могут быть или разрешительными (`PERMISSIVE`), или ограничительными (`RESTRICTIVE`). По умолчанию политики создаются разрешительными (`PERMISSIVE`); такие политики комбинируются с использованием логического оператора `OR`.
|
||||||
|
|
||||||
|
Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`.
|
||||||
|
|
||||||
|
Общая формула выглядит так:
|
||||||
|
|
||||||
|
```
|
||||||
|
строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И
|
||||||
|
(все restrictive-политики дали ненулевой результат проверки условия)
|
||||||
|
```
|
||||||
|
|
||||||
|
Например, политики
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||||
|
```
|
||||||
|
|
||||||
|
разрешат пользователю с именем `peter` видеть только те строки, для которых будет одновременно `b=1` и `c=2`.
|
||||||
|
|
||||||
|
## Секция ON CLUSTER {#create-row-policy-on-cluster}
|
||||||
|
|
||||||
|
Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
## Примеры
|
## Примеры
|
||||||
|
|
||||||
`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost`
|
`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost`
|
||||||
|
|
||||||
`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira`
|
`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira`
|
||||||
|
|
||||||
|
`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin`
|
||||||
|
|
||||||
<!--hide-->
|
<!--hide-->
|
@ -46,15 +46,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function()
|
|||||||
### Из запроса SELECT {#from-select-query}
|
### Из запроса SELECT {#from-select-query}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком engine, и заполняет её данными из SELECT-а.
|
Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком `engine`, и заполняет её данными из `SELECT`. Также вы можете явно задать описание столбцов.
|
||||||
|
|
||||||
Во всех случаях, если указано `IF NOT EXISTS`, то запрос не будет возвращать ошибку, если таблица уже существует. В этом случае, запрос будет ничего не делать.
|
Если таблица уже существует и указано `IF NOT EXISTS`, то запрос ничего не делает.
|
||||||
|
|
||||||
После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../../engines/table-engines/index.md#table_engines).
|
После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../../engines/table-engines/index.md#table_engines).
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1;
|
||||||
|
SELECT x, toTypeName(x) FROM t1;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─x─┬─toTypeName(x)─┐
|
||||||
|
│ 1 │ String │
|
||||||
|
└───┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## Модификатор NULL или NOT NULL {#null-modifiers}
|
## Модификатор NULL или NOT NULL {#null-modifiers}
|
||||||
|
|
||||||
Модификатор `NULL` или `NOT NULL`, указанный после типа данных в определении столбца, позволяет или не позволяет типу данных быть [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
|
Модификатор `NULL` или `NOT NULL`, указанный после типа данных в определении столбца, позволяет или не позволяет типу данных быть [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||||
|
@ -93,7 +93,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
|||||||
- `ALTER ADD CONSTRAINT`
|
- `ALTER ADD CONSTRAINT`
|
||||||
- `ALTER DROP CONSTRAINT`
|
- `ALTER DROP CONSTRAINT`
|
||||||
- `ALTER TTL`
|
- `ALTER TTL`
|
||||||
- `ALTER MATERIALIZE TTL`
|
- `ALTER MATERIALIZE TTL`
|
||||||
- `ALTER SETTINGS`
|
- `ALTER SETTINGS`
|
||||||
- `ALTER MOVE PARTITION`
|
- `ALTER MOVE PARTITION`
|
||||||
- `ALTER FETCH PARTITION`
|
- `ALTER FETCH PARTITION`
|
||||||
@ -104,9 +104,9 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
|||||||
- [CREATE](#grant-create)
|
- [CREATE](#grant-create)
|
||||||
- `CREATE DATABASE`
|
- `CREATE DATABASE`
|
||||||
- `CREATE TABLE`
|
- `CREATE TABLE`
|
||||||
|
- `CREATE TEMPORARY TABLE`
|
||||||
- `CREATE VIEW`
|
- `CREATE VIEW`
|
||||||
- `CREATE DICTIONARY`
|
- `CREATE DICTIONARY`
|
||||||
- `CREATE TEMPORARY TABLE`
|
|
||||||
- [DROP](#grant-drop)
|
- [DROP](#grant-drop)
|
||||||
- `DROP DATABASE`
|
- `DROP DATABASE`
|
||||||
- `DROP TABLE`
|
- `DROP TABLE`
|
||||||
@ -152,7 +152,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
|||||||
- `SYSTEM RELOAD`
|
- `SYSTEM RELOAD`
|
||||||
- `SYSTEM RELOAD CONFIG`
|
- `SYSTEM RELOAD CONFIG`
|
||||||
- `SYSTEM RELOAD DICTIONARY`
|
- `SYSTEM RELOAD DICTIONARY`
|
||||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`
|
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`
|
||||||
- `SYSTEM MERGES`
|
- `SYSTEM MERGES`
|
||||||
- `SYSTEM TTL MERGES`
|
- `SYSTEM TTL MERGES`
|
||||||
- `SYSTEM FETCHES`
|
- `SYSTEM FETCHES`
|
||||||
@ -279,7 +279,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
|||||||
- `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT`
|
- `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT`
|
||||||
- `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT`
|
- `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT`
|
||||||
- `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL`
|
- `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL`
|
||||||
- `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL`
|
- `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL`
|
||||||
- `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
- `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING`
|
||||||
- `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
- `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART`
|
||||||
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION`
|
- `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION`
|
||||||
@ -307,9 +307,9 @@ GRANT INSERT(x,y) ON db.table TO john
|
|||||||
- `CREATE`. Уровень: `GROUP`
|
- `CREATE`. Уровень: `GROUP`
|
||||||
- `CREATE DATABASE`. Уровень: `DATABASE`
|
- `CREATE DATABASE`. Уровень: `DATABASE`
|
||||||
- `CREATE TABLE`. Уровень: `TABLE`
|
- `CREATE TABLE`. Уровень: `TABLE`
|
||||||
|
- `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL`
|
||||||
- `CREATE VIEW`. Уровень: `VIEW`
|
- `CREATE VIEW`. Уровень: `VIEW`
|
||||||
- `CREATE DICTIONARY`. Уровень: `DICTIONARY`
|
- `CREATE DICTIONARY`. Уровень: `DICTIONARY`
|
||||||
- `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL`
|
|
||||||
|
|
||||||
**Дополнительно**
|
**Дополнительно**
|
||||||
|
|
||||||
@ -407,7 +407,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
|||||||
- `SYSTEM RELOAD`. Уровень: `GROUP`
|
- `SYSTEM RELOAD`. Уровень: `GROUP`
|
||||||
- `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG`
|
- `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG`
|
||||||
- `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
|
- `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES`
|
||||||
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES`
|
- `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES`
|
||||||
- `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
|
- `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES`
|
||||||
- `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
|
- `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES`
|
||||||
- `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
|
- `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES`
|
||||||
|
@ -5,19 +5,83 @@ toc_title: OPTIMIZE
|
|||||||
|
|
||||||
# OPTIMIZE {#misc_operations-optimize}
|
# OPTIMIZE {#misc_operations-optimize}
|
||||||
|
|
||||||
``` sql
|
Запрос пытается запустить внеплановое слияние кусков данных для таблиц.
|
||||||
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE]
|
|
||||||
```
|
|
||||||
|
|
||||||
Запрос пытается запустить внеплановый мёрж кусков данных для таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Другие движки таблиц не поддерживаются.
|
|
||||||
|
|
||||||
Если `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на мёрж и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`).
|
|
||||||
|
|
||||||
- Если `OPTIMIZE` не выполняет мёрж по любой причине, ClickHouse не оповещает об этом клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop).
|
|
||||||
- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr).
|
|
||||||
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
|
|
||||||
- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех колонках), имеет смысл только для движка MergeTree.
|
|
||||||
|
|
||||||
!!! warning "Внимание"
|
!!! warning "Внимание"
|
||||||
Запрос `OPTIMIZE` не может устранить причину появления ошибки «Too many parts».
|
`OPTIMIZE` не устраняет причину появления ошибки `Too many parts`.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Может применяться к таблицам семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), [MaterializedView](../../engines/table-engines/special/materializedview.md) и [Buffer](../../engines/table-engines/special/buffer.md). Другие движки таблиц не поддерживаются.
|
||||||
|
|
||||||
|
Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`).
|
||||||
|
|
||||||
|
- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то
|
||||||
|
ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop).
|
||||||
|
- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr).
|
||||||
|
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
|
||||||
|
- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree.
|
||||||
|
|
||||||
|
## Выражение BY {#by-expression}
|
||||||
|
|
||||||
|
Чтобы выполнить дедупликацию по произвольному набору столбцов, вы можете явно указать список столбцов или использовать любую комбинацию подстановки [`*`](../../sql-reference/statements/select/index.md#asterisk), выражений [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) и [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier).
|
||||||
|
|
||||||
|
Список столбцов для дедупликации должен включать все столбцы, указанные в условиях сортировки (первичный ключ и ключ сортировки), а также в условиях партиционирования (ключ партиционирования).
|
||||||
|
|
||||||
|
!!! note "Примечание"
|
||||||
|
Обратите внимание, что символ подстановки `*` обрабатывается так же, как и в запросах `SELECT`: столбцы `MATERIALIZED` и `ALIAS` не включаются в результат.
|
||||||
|
Если указать пустой список или выражение, которое возвращает пустой список, или дедуплицировать столбец по псевдониму (`ALIAS`), то сервер вернет ошибку.
|
||||||
|
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Рассмотрим таблицу:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE example (
|
||||||
|
primary_key Int32,
|
||||||
|
secondary_key Int32,
|
||||||
|
value UInt32,
|
||||||
|
partition_key UInt32,
|
||||||
|
materialized_value UInt32 MATERIALIZED 12345,
|
||||||
|
aliased_value UInt32 ALIAS 2,
|
||||||
|
PRIMARY KEY primary_key
|
||||||
|
) ENGINE=MergeTree
|
||||||
|
PARTITION BY partition_key;
|
||||||
|
```
|
||||||
|
|
||||||
|
Прежний способ дедупликации, когда учитываются все столбцы. Строка удаляется только в том случае, если все значения во всех столбцах равны соответствующим значениям в предыдущей строке.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE example FINAL DEDUPLICATE;
|
||||||
|
```
|
||||||
|
|
||||||
|
Дедупликация по всем столбцам, кроме `ALIAS` и `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key` и `materialized_value`.
|
||||||
|
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY *;
|
||||||
|
```
|
||||||
|
|
||||||
|
Дедупликация по всем столбцам, кроме `ALIAS`, `MATERIALIZED` и `materialized_value`: столбцы `primary_key`, `secondary_key`, `value` и `partition_key`.
|
||||||
|
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value;
|
||||||
|
```
|
||||||
|
|
||||||
|
Дедупликация по столбцам `primary_key`, `secondary_key` и `partition_key`.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key;
|
||||||
|
```
|
||||||
|
|
||||||
|
Дедупликация по любому столбцу, соответствующему регулярному выражению: столбцам `primary_key`, `secondary_key` и `partition_key`.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key');
|
||||||
|
```
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/isLocalAddress.h>
|
||||||
|
#include <Common/DNSResolver.h>
|
||||||
#include <common/setTerminalEcho.h>
|
#include <common/setTerminalEcho.h>
|
||||||
#include <ext/scope_guard.h>
|
#include <ext/scope_guard.h>
|
||||||
|
|
||||||
@ -60,7 +62,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
compression = config.getBool("compression", true) ? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
/// By default compression is disabled if address looks like localhost.
|
||||||
|
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||||
|
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
||||||
|
|
||||||
timeouts = ConnectionTimeouts(
|
timeouts = ConnectionTimeouts(
|
||||||
Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
|
Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
|
||||||
|
@ -562,20 +562,32 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
|
|
||||||
bool stdin_is_a_tty = isatty(STDIN_FILENO);
|
bool stdin_is_a_tty = isatty(STDIN_FILENO);
|
||||||
bool stdout_is_a_tty = isatty(STDOUT_FILENO);
|
bool stdout_is_a_tty = isatty(STDOUT_FILENO);
|
||||||
bool is_interactive = stdin_is_a_tty && stdout_is_a_tty;
|
|
||||||
|
/// dpkg or apt installers can ask for non-interactive work explicitly.
|
||||||
|
|
||||||
|
const char * debian_frontend_var = getenv("DEBIAN_FRONTEND");
|
||||||
|
bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive");
|
||||||
|
|
||||||
|
bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty;
|
||||||
|
|
||||||
|
/// We can ask password even if stdin is closed/redirected but /dev/tty is available.
|
||||||
|
bool can_ask_password = !noninteractive && stdout_is_a_tty;
|
||||||
|
|
||||||
if (has_password_for_default_user)
|
if (has_password_for_default_user)
|
||||||
{
|
{
|
||||||
fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE,
|
fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n",
|
||||||
users_config_file.string(), users_d.string());
|
users_config_file.string(), users_d.string());
|
||||||
}
|
}
|
||||||
else if (!is_interactive)
|
else if (!can_ask_password)
|
||||||
{
|
{
|
||||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE,
|
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
||||||
users_config_file.string(), users_d.string());
|
users_config_file.string(), users_d.string());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/// NOTE: When installing debian package with dpkg -i, stdin is not a terminal but we are still being able to enter password.
|
||||||
|
/// More sophisticated method with /dev/tty is used inside the `readpassphrase` function.
|
||||||
|
|
||||||
char buf[1000] = {};
|
char buf[1000] = {};
|
||||||
std::string password;
|
std::string password;
|
||||||
if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0))
|
if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0))
|
||||||
@ -603,7 +615,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
"</yandex>\n";
|
"</yandex>\n";
|
||||||
out.sync();
|
out.sync();
|
||||||
out.finalize();
|
out.finalize();
|
||||||
fmt::print("Password for default user is saved in file {}.\n", password_file);
|
fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file);
|
||||||
#else
|
#else
|
||||||
out << "<yandex>\n"
|
out << "<yandex>\n"
|
||||||
" <users>\n"
|
" <users>\n"
|
||||||
@ -614,12 +626,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
"</yandex>\n";
|
"</yandex>\n";
|
||||||
out.sync();
|
out.sync();
|
||||||
out.finalize();
|
out.finalize();
|
||||||
fmt::print("Password for default user is saved in plaintext in file {}.\n", password_file);
|
fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file);
|
||||||
#endif
|
#endif
|
||||||
has_password_for_default_user = true;
|
has_password_for_default_user = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
fmt::print("Password for default user is empty string. See {} and {} to change it.\n",
|
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
||||||
users_config_file.string(), users_d.string());
|
users_config_file.string(), users_d.string());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,7 +656,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
" This is optional. Taskstats accounting will be disabled."
|
" This is optional. Taskstats accounting will be disabled."
|
||||||
" To enable taskstats accounting you may add the required capability later manually.\"",
|
" To enable taskstats accounting you may add the required capability later manually.\"",
|
||||||
"/tmp/test_setcap.sh", fs::canonical(main_bin_path).string());
|
"/tmp/test_setcap.sh", fs::canonical(main_bin_path).string());
|
||||||
fmt::print(" {}\n", command);
|
|
||||||
executeScript(command);
|
executeScript(command);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -54,9 +54,10 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string
|
|||||||
void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||||
{
|
{
|
||||||
HTMLForm params(request);
|
HTMLForm params(request);
|
||||||
|
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||||
|
|
||||||
if (mode == "read")
|
if (mode == "read")
|
||||||
params.read(request.getStream());
|
params.read(request.getStream());
|
||||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
|
||||||
|
|
||||||
if (mode == "read" && !params.has("query"))
|
if (mode == "read" && !params.has("query"))
|
||||||
{
|
{
|
||||||
@ -64,11 +65,6 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!params.has("columns"))
|
|
||||||
{
|
|
||||||
processError(response, "No 'columns' in request URL");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!params.has("connection_string"))
|
if (!params.has("connection_string"))
|
||||||
{
|
{
|
||||||
@ -76,6 +72,16 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!params.has("sample_block"))
|
||||||
|
{
|
||||||
|
processError(response, "No 'sample_block' in request URL");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string format = params.get("format", "RowBinary");
|
||||||
|
std::string connection_string = params.get("connection_string");
|
||||||
|
LOG_TRACE(log, "Connection string: '{}'", connection_string);
|
||||||
|
|
||||||
UInt64 max_block_size = DEFAULT_BLOCK_SIZE;
|
UInt64 max_block_size = DEFAULT_BLOCK_SIZE;
|
||||||
if (params.has("max_block_size"))
|
if (params.has("max_block_size"))
|
||||||
{
|
{
|
||||||
@ -88,24 +94,19 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
|||||||
max_block_size = parse<size_t>(max_block_size_str);
|
max_block_size = parse<size_t>(max_block_size_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string columns = params.get("columns");
|
std::string sample_block_string = params.get("sample_block");
|
||||||
std::unique_ptr<Block> sample_block;
|
std::unique_ptr<Block> sample_block;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
sample_block = parseColumns(std::move(columns));
|
sample_block = parseColumns(std::move(sample_block_string));
|
||||||
}
|
}
|
||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
{
|
{
|
||||||
processError(response, "Invalid 'columns' parameter in request body '" + ex.message() + "'");
|
processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'");
|
||||||
LOG_WARNING(log, ex.getStackTraceString());
|
LOG_ERROR(log, ex.getStackTraceString());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string format = params.get("format", "RowBinary");
|
|
||||||
|
|
||||||
std::string connection_string = params.get("connection_string");
|
|
||||||
LOG_TRACE(log, "Connection string: '{}'", connection_string);
|
|
||||||
|
|
||||||
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
|
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
|
||||||
|
|
||||||
try
|
try
|
||||||
|
@ -19,7 +19,18 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
std::string getIdentifierQuote(nanodbc::connection & connection)
|
std::string getIdentifierQuote(nanodbc::connection & connection)
|
||||||
{
|
{
|
||||||
return connection.get_info<std::string>(SQL_IDENTIFIER_QUOTE_CHAR);
|
std::string quote;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
quote = connection.get_info<std::string>(SQL_IDENTIFIER_QUOTE_CHAR);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false));
|
||||||
|
return "\"";
|
||||||
|
}
|
||||||
|
|
||||||
|
return quote;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,7 +7,20 @@
|
|||||||
-->
|
-->
|
||||||
<yandex>
|
<yandex>
|
||||||
<logger>
|
<logger>
|
||||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105 -->
|
<!-- Possible levels [1]:
|
||||||
|
|
||||||
|
- none (turns off logging)
|
||||||
|
- fatal
|
||||||
|
- critical
|
||||||
|
- error
|
||||||
|
- warning
|
||||||
|
- notice
|
||||||
|
- information
|
||||||
|
- debug
|
||||||
|
- trace
|
||||||
|
|
||||||
|
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||||
|
-->
|
||||||
<level>trace</level>
|
<level>trace</level>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||||
@ -76,7 +89,7 @@
|
|||||||
<!-- Compatibility with PostgreSQL protocol.
|
<!-- Compatibility with PostgreSQL protocol.
|
||||||
ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
|
ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
|
||||||
-->
|
-->
|
||||||
<!-- <postgresql_port>9005</postgresql_port> -->
|
<postgresql_port>9005</postgresql_port>
|
||||||
|
|
||||||
<!-- HTTP API with TLS (HTTPS).
|
<!-- HTTP API with TLS (HTTPS).
|
||||||
You have to configure certificate to enable this interface.
|
You have to configure certificate to enable this interface.
|
||||||
|
@ -62,7 +62,7 @@ enum class AccessType
|
|||||||
enabled implicitly by the grant ALTER_TABLE */\
|
enabled implicitly by the grant ALTER_TABLE */\
|
||||||
M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\
|
M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\
|
||||||
M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \
|
M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \
|
||||||
M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \
|
M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \
|
||||||
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
||||||
\
|
\
|
||||||
M(ALTER_TABLE, "", GROUP, ALTER) \
|
M(ALTER_TABLE, "", GROUP, ALTER) \
|
||||||
@ -130,6 +130,7 @@ enum class AccessType
|
|||||||
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
||||||
|
M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
|
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
|
||||||
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
|
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
|
||||||
M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \
|
M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \
|
||||||
|
@ -96,7 +96,7 @@ public:
|
|||||||
UInt32 num_scale_ = 0, UInt32 denom_scale_ = 0)
|
UInt32 num_scale_ = 0, UInt32 denom_scale_ = 0)
|
||||||
: Base(argument_types_, {}), num_scale(num_scale_), denom_scale(denom_scale_) {}
|
: Base(argument_types_, {}), num_scale(num_scale_), denom_scale(denom_scale_) {}
|
||||||
|
|
||||||
DataTypePtr getReturnType() const final { return std::make_shared<DataTypeNumber<Float64>>(); }
|
DataTypePtr getReturnType() const override { return std::make_shared<DataTypeNumber<Float64>>(); }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
|
49
src/AggregateFunctions/AggregateFunctionSumCount.cpp
Normal file
49
src/AggregateFunctions/AggregateFunctionSumCount.cpp
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
#include <AggregateFunctions/AggregateFunctionSumCount.h>
|
||||||
|
#include <AggregateFunctions/Helpers.h>
|
||||||
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
|
#include "registerAggregateFunctions.h"
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
bool allowType(const DataTypePtr& type) noexcept
|
||||||
|
{
|
||||||
|
const WhichDataType t(type);
|
||||||
|
return t.isInt() || t.isUInt() || t.isFloat() || t.isDecimal();
|
||||||
|
}
|
||||||
|
|
||||||
|
AggregateFunctionPtr createAggregateFunctionSumCount(const std::string & name, const DataTypes & argument_types, const Array & parameters)
|
||||||
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
assertUnary(name, argument_types);
|
||||||
|
|
||||||
|
AggregateFunctionPtr res;
|
||||||
|
DataTypePtr data_type = argument_types[0];
|
||||||
|
if (!allowType(data_type))
|
||||||
|
throw Exception("Illegal type " + data_type->getName() + " of argument for aggregate function " + name,
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
if (isDecimal(data_type))
|
||||||
|
res.reset(createWithDecimalType<AggregateFunctionSumCount>(
|
||||||
|
*data_type, argument_types, getDecimalScale(*data_type)));
|
||||||
|
else
|
||||||
|
res.reset(createWithNumericType<AggregateFunctionSumCount>(*data_type, argument_types));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerAggregateFunctionSumCount(AggregateFunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction("sumCount", createAggregateFunctionSumCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
55
src/AggregateFunctions/AggregateFunctionSumCount.h
Normal file
55
src/AggregateFunctions/AggregateFunctionSumCount.h
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <AggregateFunctions/AggregateFunctionAvg.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
template <typename T>
|
||||||
|
using DecimalOrNumberDataType = std::conditional_t<IsDecimalNumber<T>, DataTypeDecimal<AvgFieldType<T>>, DataTypeNumber<AvgFieldType<T>>>;
|
||||||
|
template <typename T>
|
||||||
|
class AggregateFunctionSumCount final : public AggregateFunctionAvgBase<AvgFieldType<T>, UInt64, AggregateFunctionSumCount<T>>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using Base = AggregateFunctionAvgBase<AvgFieldType<T>, UInt64, AggregateFunctionSumCount<T>>;
|
||||||
|
|
||||||
|
AggregateFunctionSumCount(const DataTypes & argument_types_, UInt32 num_scale_ = 0)
|
||||||
|
: Base(argument_types_, num_scale_), scale(num_scale_) {}
|
||||||
|
|
||||||
|
DataTypePtr getReturnType() const override
|
||||||
|
{
|
||||||
|
DataTypes types;
|
||||||
|
if constexpr (IsDecimalNumber<T>)
|
||||||
|
types.emplace_back(std::make_shared<DecimalOrNumberDataType<T>>(DecimalOrNumberDataType<T>::maxPrecision(), scale));
|
||||||
|
else
|
||||||
|
types.emplace_back(std::make_shared<DecimalOrNumberDataType<T>>());
|
||||||
|
|
||||||
|
types.emplace_back(std::make_shared<DataTypeUInt64>());
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeTuple>(types);
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const final
|
||||||
|
{
|
||||||
|
assert_cast<DecimalOrVectorCol<AvgFieldType<T>> &>((assert_cast<ColumnTuple &>(to)).getColumn(0)).getData().push_back(
|
||||||
|
this->data(place).numerator);
|
||||||
|
|
||||||
|
assert_cast<ColumnUInt64 &>((assert_cast<ColumnTuple &>(to)).getColumn(1)).getData().push_back(
|
||||||
|
this->data(place).denominator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
||||||
|
{
|
||||||
|
this->data(place).numerator += static_cast<const DecimalOrVectorCol<T> &>(*columns[0]).getData()[row_num];
|
||||||
|
++this->data(place).denominator;
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const final { return "sumCount"; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
UInt32 scale;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -132,6 +132,12 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory)
|
|||||||
|
|
||||||
factory.registerFunction("uniqExact",
|
factory.registerFunction("uniqExact",
|
||||||
{createAggregateFunctionUniq<true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactData<String>>, properties});
|
{createAggregateFunctionUniq<true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactData<String>>, properties});
|
||||||
|
|
||||||
|
#if USE_DATASKETCHES
|
||||||
|
factory.registerFunction("uniqThetaSketch",
|
||||||
|
{createAggregateFunctionUniq<AggregateFunctionUniqThetaSketchData, AggregateFunctionUniqThetaSketchData>, properties});
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#include <AggregateFunctions/UniquesHashSet.h>
|
#include <AggregateFunctions/UniquesHashSet.h>
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
#include <AggregateFunctions/ThetaSketchData.h>
|
||||||
#include <AggregateFunctions/UniqVariadicHash.h>
|
#include <AggregateFunctions/UniqVariadicHash.h>
|
||||||
|
|
||||||
|
|
||||||
@ -124,6 +125,19 @@ struct AggregateFunctionUniqExactData<String>
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// uniqThetaSketch
|
||||||
|
#if USE_DATASKETCHES
|
||||||
|
|
||||||
|
struct AggregateFunctionUniqThetaSketchData
|
||||||
|
{
|
||||||
|
using Set = ThetaSketchData<UInt64>;
|
||||||
|
Set set;
|
||||||
|
|
||||||
|
static String getName() { return "uniqThetaSketch"; }
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace detail
|
namespace detail
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -189,6 +203,12 @@ struct OneAdder
|
|||||||
data.set.insert(key);
|
data.set.insert(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#if USE_DATASKETCHES
|
||||||
|
else if constexpr (std::is_same_v<Data, AggregateFunctionUniqThetaSketchData>)
|
||||||
|
{
|
||||||
|
data.set.insertOriginal(column.getDataAt(row_num));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
119
src/AggregateFunctions/ThetaSketchData.h
Normal file
119
src/AggregateFunctions/ThetaSketchData.h
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
# include <Common/config.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if USE_DATASKETCHES
|
||||||
|
|
||||||
|
#include <boost/noncopyable.hpp>
|
||||||
|
#include <memory>
|
||||||
|
#include <theta_sketch.hpp> // Y_IGNORE
|
||||||
|
#include <theta_union.hpp> // Y_IGNORE
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
template <typename Key>
|
||||||
|
class ThetaSketchData : private boost::noncopyable
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
std::unique_ptr<datasketches::update_theta_sketch> sk_update;
|
||||||
|
std::unique_ptr<datasketches::theta_union> sk_union;
|
||||||
|
|
||||||
|
inline datasketches::update_theta_sketch * getSkUpdate()
|
||||||
|
{
|
||||||
|
if (!sk_update)
|
||||||
|
sk_update = std::make_unique<datasketches::update_theta_sketch>(datasketches::update_theta_sketch::builder().build());
|
||||||
|
return sk_update.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline datasketches::theta_union * getSkUnion()
|
||||||
|
{
|
||||||
|
if (!sk_union)
|
||||||
|
sk_union = std::make_unique<datasketches::theta_union>(datasketches::theta_union::builder().build());
|
||||||
|
return sk_union.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
using value_type = Key;
|
||||||
|
|
||||||
|
ThetaSketchData() = default;
|
||||||
|
~ThetaSketchData() = default;
|
||||||
|
|
||||||
|
/// Insert original value without hash, as `datasketches::update_theta_sketch.update` will do the hash internal.
|
||||||
|
void insertOriginal(const StringRef & value)
|
||||||
|
{
|
||||||
|
getSkUpdate()->update(value.data, value.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Note that `datasketches::update_theta_sketch.update` will do the hash again.
|
||||||
|
void insert(Key value)
|
||||||
|
{
|
||||||
|
getSkUpdate()->update(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
UInt64 size() const
|
||||||
|
{
|
||||||
|
if (sk_union)
|
||||||
|
return static_cast<UInt64>(sk_union->get_result().get_estimate());
|
||||||
|
else if (sk_update)
|
||||||
|
return static_cast<UInt64>(sk_update->get_estimate());
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(const ThetaSketchData & rhs)
|
||||||
|
{
|
||||||
|
datasketches::theta_union * u = getSkUnion();
|
||||||
|
|
||||||
|
if (sk_update)
|
||||||
|
{
|
||||||
|
u->update(*sk_update);
|
||||||
|
sk_update.reset(nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rhs.sk_update)
|
||||||
|
u->update(*rhs.sk_update);
|
||||||
|
else if (rhs.sk_union)
|
||||||
|
u->update(rhs.sk_union->get_result());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// You can only call for an empty object.
|
||||||
|
void read(DB::ReadBuffer & in)
|
||||||
|
{
|
||||||
|
datasketches::compact_theta_sketch::vector_bytes bytes;
|
||||||
|
readVectorBinary(bytes, in);
|
||||||
|
if (!bytes.empty())
|
||||||
|
{
|
||||||
|
auto sk = datasketches::compact_theta_sketch::deserialize(bytes.data(), bytes.size());
|
||||||
|
getSkUnion()->update(sk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write(DB::WriteBuffer & out) const
|
||||||
|
{
|
||||||
|
if (sk_update)
|
||||||
|
{
|
||||||
|
auto bytes = sk_update->compact().serialize();
|
||||||
|
writeVectorBinary(bytes, out);
|
||||||
|
}
|
||||||
|
else if (sk_union)
|
||||||
|
{
|
||||||
|
auto bytes = sk_union->get_result().serialize();
|
||||||
|
writeVectorBinary(bytes, out);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
datasketches::compact_theta_sketch::vector_bytes bytes;
|
||||||
|
writeVectorBinary(bytes, out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -25,6 +25,7 @@ void registerAggregateFunctionsAny(AggregateFunctionFactory &);
|
|||||||
void registerAggregateFunctionsStatisticsStable(AggregateFunctionFactory &);
|
void registerAggregateFunctionsStatisticsStable(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory &);
|
void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionSum(AggregateFunctionFactory &);
|
void registerAggregateFunctionSum(AggregateFunctionFactory &);
|
||||||
|
void registerAggregateFunctionSumCount(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionSumMap(AggregateFunctionFactory &);
|
void registerAggregateFunctionSumMap(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionsUniq(AggregateFunctionFactory &);
|
void registerAggregateFunctionsUniq(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionUniqCombined(AggregateFunctionFactory &);
|
void registerAggregateFunctionUniqCombined(AggregateFunctionFactory &);
|
||||||
@ -83,6 +84,7 @@ void registerAggregateFunctions()
|
|||||||
registerAggregateFunctionsStatisticsStable(factory);
|
registerAggregateFunctionsStatisticsStable(factory);
|
||||||
registerAggregateFunctionsStatisticsSimple(factory);
|
registerAggregateFunctionsStatisticsSimple(factory);
|
||||||
registerAggregateFunctionSum(factory);
|
registerAggregateFunctionSum(factory);
|
||||||
|
registerAggregateFunctionSumCount(factory);
|
||||||
registerAggregateFunctionSumMap(factory);
|
registerAggregateFunctionSumMap(factory);
|
||||||
registerAggregateFunctionsUniq(factory);
|
registerAggregateFunctionsUniq(factory);
|
||||||
registerAggregateFunctionUniqCombined(factory);
|
registerAggregateFunctionUniqCombined(factory);
|
||||||
|
@ -37,7 +37,7 @@ class IXDBCBridgeHelper : public IBridgeHelper
|
|||||||
public:
|
public:
|
||||||
explicit IXDBCBridgeHelper(ContextPtr context_) : IBridgeHelper(context_) {}
|
explicit IXDBCBridgeHelper(ContextPtr context_) : IBridgeHelper(context_) {}
|
||||||
|
|
||||||
virtual std::vector<std::pair<std::string, std::string>> getURLParams(const std::string & cols, UInt64 max_block_size) const = 0;
|
virtual std::vector<std::pair<std::string, std::string>> getURLParams(UInt64 max_block_size) const = 0;
|
||||||
|
|
||||||
virtual Poco::URI getColumnsInfoURI() const = 0;
|
virtual Poco::URI getColumnsInfoURI() const = 0;
|
||||||
|
|
||||||
@ -138,12 +138,11 @@ protected:
|
|||||||
return uri;
|
return uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
URLParams getURLParams(const std::string & cols, UInt64 max_block_size) const override
|
URLParams getURLParams(UInt64 max_block_size) const override
|
||||||
{
|
{
|
||||||
std::vector<std::pair<std::string, std::string>> result;
|
std::vector<std::pair<std::string, std::string>> result;
|
||||||
|
|
||||||
result.emplace_back("connection_string", connection_string); /// already validated
|
result.emplace_back("connection_string", connection_string); /// already validated
|
||||||
result.emplace_back("columns", cols);
|
|
||||||
result.emplace_back("max_block_size", std::to_string(max_block_size));
|
result.emplace_back("max_block_size", std::to_string(max_block_size));
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -139,6 +139,8 @@ public:
|
|||||||
UInt16 getPort() const;
|
UInt16 getPort() const;
|
||||||
const String & getDefaultDatabase() const;
|
const String & getDefaultDatabase() const;
|
||||||
|
|
||||||
|
Protocol::Compression getCompression() const { return compression; }
|
||||||
|
|
||||||
/// If last flag is true, you need to call sendExternalTablesData after.
|
/// If last flag is true, you need to call sendExternalTablesData after.
|
||||||
void sendQuery(
|
void sendQuery(
|
||||||
const ConnectionTimeouts & timeouts,
|
const ConnectionTimeouts & timeouts,
|
||||||
|
@ -122,7 +122,7 @@ namespace
|
|||||||
else if (auto * data_uint64 = getIndexesData<UInt64>(column))
|
else if (auto * data_uint64 = getIndexesData<UInt64>(column))
|
||||||
return mapUniqueIndexImpl(*data_uint64);
|
return mapUniqueIndexImpl(*data_uint64);
|
||||||
else
|
else
|
||||||
throw Exception("Indexes column for getUniqueIndex must be ColumnUInt, got" + column.getName(),
|
throw Exception("Indexes column for getUniqueIndex must be ColumnUInt, got " + column.getName(),
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
|
|||||||
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
||||||
|
|
||||||
if (!low_cardinality_src)
|
if (!low_cardinality_src)
|
||||||
throw Exception("Expected ColumnLowCardinality, got" + src.getName(), ErrorCodes::ILLEGAL_COLUMN);
|
throw Exception("Expected ColumnLowCardinality, got " + src.getName(), ErrorCodes::ILLEGAL_COLUMN);
|
||||||
|
|
||||||
size_t position = low_cardinality_src->getIndexes().getUInt(n);
|
size_t position = low_cardinality_src->getIndexes().getUInt(n);
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ ColumnPtr selectIndexImpl(const Column & column, const IColumn & indexes, size_t
|
|||||||
else if (auto * data_uint64 = detail::getIndexesData<UInt64>(indexes))
|
else if (auto * data_uint64 = detail::getIndexesData<UInt64>(indexes))
|
||||||
return column.template indexImpl<UInt64>(*data_uint64, limit);
|
return column.template indexImpl<UInt64>(*data_uint64, limit);
|
||||||
else
|
else
|
||||||
throw Exception("Indexes column for IColumn::select must be ColumnUInt, got" + indexes.getName(),
|
throw Exception("Indexes column for IColumn::select must be ColumnUInt, got " + indexes.getName(),
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,6 @@ void checkColumn(
|
|||||||
if (num_collisions <= max_collisions_to_print)
|
if (num_collisions <= max_collisions_to_print)
|
||||||
{
|
{
|
||||||
collisions_str << "Collision:\n";
|
collisions_str << "Collision:\n";
|
||||||
collisions_str << print_for_row(it->second) << '\n';
|
|
||||||
collisions_str << print_for_row(i) << std::endl;
|
collisions_str << print_for_row(i) << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_
|
|||||||
///
|
///
|
||||||
/// And in this case the exception will not be logged, so let's block the
|
/// And in this case the exception will not be logged, so let's block the
|
||||||
/// MemoryTracker until the exception will be logged.
|
/// MemoryTracker until the exception will be logged.
|
||||||
MemoryTracker::LockExceptionInThread lock_memory_tracker;
|
MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -24,8 +24,8 @@ namespace
|
|||||||
///
|
///
|
||||||
/// - when it is explicitly blocked with LockExceptionInThread
|
/// - when it is explicitly blocked with LockExceptionInThread
|
||||||
///
|
///
|
||||||
/// - to avoid std::terminate(), when stack unwinding is currently in progress
|
/// - when there are uncaught exceptions objects in the current thread
|
||||||
/// in this thread.
|
/// (to avoid std::terminate())
|
||||||
///
|
///
|
||||||
/// NOTE: that since C++11 destructor marked with noexcept by default, and
|
/// NOTE: that since C++11 destructor marked with noexcept by default, and
|
||||||
/// this means that any throw from destructor (that is not marked with
|
/// this means that any throw from destructor (that is not marked with
|
||||||
|
@ -146,6 +146,9 @@
|
|||||||
M(StorageBufferPassedTimeMaxThreshold, "") \
|
M(StorageBufferPassedTimeMaxThreshold, "") \
|
||||||
M(StorageBufferPassedRowsMaxThreshold, "") \
|
M(StorageBufferPassedRowsMaxThreshold, "") \
|
||||||
M(StorageBufferPassedBytesMaxThreshold, "") \
|
M(StorageBufferPassedBytesMaxThreshold, "") \
|
||||||
|
M(StorageBufferPassedTimeFlushThreshold, "") \
|
||||||
|
M(StorageBufferPassedRowsFlushThreshold, "") \
|
||||||
|
M(StorageBufferPassedBytesFlushThreshold, "") \
|
||||||
M(StorageBufferLayerLockReadersWaitMilliseconds, "Time for waiting for Buffer layer during reading") \
|
M(StorageBufferLayerLockReadersWaitMilliseconds, "Time for waiting for Buffer layer during reading") \
|
||||||
M(StorageBufferLayerLockWritersWaitMilliseconds, "Time for waiting free Buffer layer to write to (can be used to tune Buffer layers)") \
|
M(StorageBufferLayerLockWritersWaitMilliseconds, "Time for waiting free Buffer layer to write to (can be used to tune Buffer layers)") \
|
||||||
\
|
\
|
||||||
|
@ -184,6 +184,10 @@ static void * getCallerAddress(const ucontext_t & context)
|
|||||||
# else
|
# else
|
||||||
return reinterpret_cast<void *>(context.uc_mcontext.gregs[REG_RIP]);
|
return reinterpret_cast<void *>(context.uc_mcontext.gregs[REG_RIP]);
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
|
#elif defined(__APPLE__) && defined(__aarch64__)
|
||||||
|
return reinterpret_cast<void *>(context.uc_mcontext->__ss.__pc);
|
||||||
|
|
||||||
#elif defined(__aarch64__)
|
#elif defined(__aarch64__)
|
||||||
return reinterpret_cast<void *>(context.uc_mcontext.pc);
|
return reinterpret_cast<void *>(context.uc_mcontext.pc);
|
||||||
#elif defined(__powerpc64__)
|
#elif defined(__powerpc64__)
|
||||||
|
@ -116,6 +116,7 @@ struct Request
|
|||||||
virtual ~Request() = default;
|
virtual ~Request() = default;
|
||||||
virtual String getPath() const = 0;
|
virtual String getPath() const = 0;
|
||||||
virtual void addRootPath(const String & /* root_path */) {}
|
virtual void addRootPath(const String & /* root_path */) {}
|
||||||
|
virtual size_t bytesSize() const { return 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Response;
|
struct Response;
|
||||||
@ -131,6 +132,7 @@ struct Response
|
|||||||
Response & operator=(const Response &) = default;
|
Response & operator=(const Response &) = default;
|
||||||
virtual ~Response() = default;
|
virtual ~Response() = default;
|
||||||
virtual void removeRootPath(const String & /* root_path */) {}
|
virtual void removeRootPath(const String & /* root_path */) {}
|
||||||
|
virtual size_t bytesSize() const { return 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct WatchResponse : virtual Response
|
struct WatchResponse : virtual Response
|
||||||
@ -140,6 +142,8 @@ struct WatchResponse : virtual Response
|
|||||||
String path;
|
String path;
|
||||||
|
|
||||||
void removeRootPath(const String & root_path) override;
|
void removeRootPath(const String & root_path) override;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size() + sizeof(type) + sizeof(state); }
|
||||||
};
|
};
|
||||||
|
|
||||||
using WatchCallback = std::function<void(const WatchResponse &)>;
|
using WatchCallback = std::function<void(const WatchResponse &)>;
|
||||||
@ -154,6 +158,9 @@ struct CreateRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size() + data.size()
|
||||||
|
+ sizeof(is_ephemeral) + sizeof(is_sequential) + acls.size() * sizeof(ACL); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CreateResponse : virtual Response
|
struct CreateResponse : virtual Response
|
||||||
@ -161,6 +168,8 @@ struct CreateResponse : virtual Response
|
|||||||
String path_created;
|
String path_created;
|
||||||
|
|
||||||
void removeRootPath(const String & root_path) override;
|
void removeRootPath(const String & root_path) override;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path_created.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct RemoveRequest : virtual Request
|
struct RemoveRequest : virtual Request
|
||||||
@ -170,6 +179,8 @@ struct RemoveRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size() + sizeof(version); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct RemoveResponse : virtual Response
|
struct RemoveResponse : virtual Response
|
||||||
@ -182,11 +193,15 @@ struct ExistsRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ExistsResponse : virtual Response
|
struct ExistsResponse : virtual Response
|
||||||
{
|
{
|
||||||
Stat stat;
|
Stat stat;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return sizeof(Stat); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetRequest : virtual Request
|
struct GetRequest : virtual Request
|
||||||
@ -195,12 +210,16 @@ struct GetRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetResponse : virtual Response
|
struct GetResponse : virtual Response
|
||||||
{
|
{
|
||||||
String data;
|
String data;
|
||||||
Stat stat;
|
Stat stat;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return data.size() + sizeof(stat); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SetRequest : virtual Request
|
struct SetRequest : virtual Request
|
||||||
@ -211,11 +230,15 @@ struct SetRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return data.size() + data.size() + sizeof(version); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SetResponse : virtual Response
|
struct SetResponse : virtual Response
|
||||||
{
|
{
|
||||||
Stat stat;
|
Stat stat;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return sizeof(stat); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ListRequest : virtual Request
|
struct ListRequest : virtual Request
|
||||||
@ -224,12 +247,22 @@ struct ListRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ListResponse : virtual Response
|
struct ListResponse : virtual Response
|
||||||
{
|
{
|
||||||
std::vector<String> names;
|
std::vector<String> names;
|
||||||
Stat stat;
|
Stat stat;
|
||||||
|
|
||||||
|
size_t bytesSize() const override
|
||||||
|
{
|
||||||
|
size_t size = sizeof(stat);
|
||||||
|
for (const auto & name : names)
|
||||||
|
size += name.size();
|
||||||
|
return size;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CheckRequest : virtual Request
|
struct CheckRequest : virtual Request
|
||||||
@ -239,6 +272,8 @@ struct CheckRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return path; }
|
String getPath() const override { return path; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size() + sizeof(version); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CheckResponse : virtual Response
|
struct CheckResponse : virtual Response
|
||||||
@ -251,6 +286,14 @@ struct MultiRequest : virtual Request
|
|||||||
|
|
||||||
void addRootPath(const String & root_path) override;
|
void addRootPath(const String & root_path) override;
|
||||||
String getPath() const override { return {}; }
|
String getPath() const override { return {}; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override
|
||||||
|
{
|
||||||
|
size_t size = 0;
|
||||||
|
for (const auto & request : requests)
|
||||||
|
size += request->bytesSize();
|
||||||
|
return size;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MultiResponse : virtual Response
|
struct MultiResponse : virtual Response
|
||||||
@ -258,6 +301,14 @@ struct MultiResponse : virtual Response
|
|||||||
Responses responses;
|
Responses responses;
|
||||||
|
|
||||||
void removeRootPath(const String & root_path) override;
|
void removeRootPath(const String & root_path) override;
|
||||||
|
|
||||||
|
size_t bytesSize() const override
|
||||||
|
{
|
||||||
|
size_t size = 0;
|
||||||
|
for (const auto & response : responses)
|
||||||
|
size += response->bytesSize();
|
||||||
|
return size;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This response may be received only as an element of responses in MultiResponse.
|
/// This response may be received only as an element of responses in MultiResponse.
|
||||||
|
@ -421,26 +421,38 @@ std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Contain
|
|||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
for (const auto & request : requests)
|
auto request_it = requests.begin();
|
||||||
|
response.error = Error::ZOK;
|
||||||
|
while (request_it != requests.end())
|
||||||
{
|
{
|
||||||
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(*request);
|
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it);
|
||||||
|
++request_it;
|
||||||
auto [ cur_response, undo_action ] = concrete_request.process(container, zxid);
|
auto [ cur_response, undo_action ] = concrete_request.process(container, zxid);
|
||||||
response.responses.emplace_back(cur_response);
|
response.responses.emplace_back(cur_response);
|
||||||
if (cur_response->error != Error::ZOK)
|
if (cur_response->error != Error::ZOK)
|
||||||
{
|
{
|
||||||
response.error = cur_response->error;
|
response.error = cur_response->error;
|
||||||
|
break;
|
||||||
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
}
|
||||||
if (*it)
|
|
||||||
(*it)();
|
undo_actions.emplace_back(std::move(undo_action));
|
||||||
|
}
|
||||||
return { std::make_shared<MultiResponse>(response), {} };
|
|
||||||
|
if (response.error != Error::ZOK)
|
||||||
|
{
|
||||||
|
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||||
|
if (*it)
|
||||||
|
(*it)();
|
||||||
|
|
||||||
|
while (request_it != requests.end())
|
||||||
|
{
|
||||||
|
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it);
|
||||||
|
++request_it;
|
||||||
|
response.responses.emplace_back(concrete_request.createResponse());
|
||||||
|
response.responses.back()->error = Error::ZRUNTIMEINCONSISTENCY;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
undo_actions.emplace_back(std::move(undo_action));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response.error = Error::ZOK;
|
|
||||||
return { std::make_shared<MultiResponse>(response), {} };
|
return { std::make_shared<MultiResponse>(response), {} };
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -455,6 +455,39 @@ ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::m
|
|||||||
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared<ZooKeeperMultiResponse>(requests); }
|
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared<ZooKeeperMultiResponse>(requests); }
|
||||||
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
|
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
|
||||||
|
|
||||||
|
void ZooKeeperSessionIDRequest::writeImpl(WriteBuffer & out) const
|
||||||
|
{
|
||||||
|
Coordination::write(internal_id, out);
|
||||||
|
Coordination::write(session_timeout_ms, out);
|
||||||
|
Coordination::write(server_id, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperSessionIDRequest::readImpl(ReadBuffer & in)
|
||||||
|
{
|
||||||
|
Coordination::read(internal_id, in);
|
||||||
|
Coordination::read(session_timeout_ms, in);
|
||||||
|
Coordination::read(server_id, in);
|
||||||
|
}
|
||||||
|
|
||||||
|
Coordination::ZooKeeperResponsePtr ZooKeeperSessionIDRequest::makeResponse() const
|
||||||
|
{
|
||||||
|
return std::make_shared<ZooKeeperSessionIDResponse>();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperSessionIDResponse::readImpl(ReadBuffer & in)
|
||||||
|
{
|
||||||
|
Coordination::read(internal_id, in);
|
||||||
|
Coordination::read(session_id, in);
|
||||||
|
Coordination::read(server_id, in);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeperSessionIDResponse::writeImpl(WriteBuffer & out) const
|
||||||
|
{
|
||||||
|
Coordination::write(internal_id, out);
|
||||||
|
Coordination::write(session_id, out);
|
||||||
|
Coordination::write(server_id, out);
|
||||||
|
}
|
||||||
|
|
||||||
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
||||||
{
|
{
|
||||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||||
@ -511,6 +544,7 @@ ZooKeeperRequestFactory::ZooKeeperRequestFactory()
|
|||||||
registerZooKeeperRequest<OpNum::List, ZooKeeperListRequest>(*this);
|
registerZooKeeperRequest<OpNum::List, ZooKeeperListRequest>(*this);
|
||||||
registerZooKeeperRequest<OpNum::Check, ZooKeeperCheckRequest>(*this);
|
registerZooKeeperRequest<OpNum::Check, ZooKeeperCheckRequest>(*this);
|
||||||
registerZooKeeperRequest<OpNum::Multi, ZooKeeperMultiRequest>(*this);
|
registerZooKeeperRequest<OpNum::Multi, ZooKeeperMultiRequest>(*this);
|
||||||
|
registerZooKeeperRequest<OpNum::SessionID, ZooKeeperSessionIDRequest>(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -84,6 +84,8 @@ struct ZooKeeperSyncRequest final : ZooKeeperRequest
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ZooKeeperRequest::bytesSize() + path.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSyncResponse final : ZooKeeperResponse
|
struct ZooKeeperSyncResponse final : ZooKeeperResponse
|
||||||
@ -92,6 +94,8 @@ struct ZooKeeperSyncResponse final : ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
OpNum getOpNum() const override { return OpNum::Sync; }
|
OpNum getOpNum() const override { return OpNum::Sync; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return path.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
|
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
|
||||||
@ -128,6 +132,9 @@ struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ZooKeeperRequest::bytesSize() + sizeof(xid) +
|
||||||
|
sizeof(type) + scheme.size() + data.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
||||||
@ -136,6 +143,8 @@ struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
|||||||
void writeImpl(WriteBuffer &) const override {}
|
void writeImpl(WriteBuffer &) const override {}
|
||||||
|
|
||||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ZooKeeperResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperCloseRequest final : ZooKeeperRequest
|
struct ZooKeeperCloseRequest final : ZooKeeperRequest
|
||||||
@ -172,6 +181,8 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
||||||
@ -181,6 +192,8 @@ struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
|||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
|
||||||
OpNum getOpNum() const override { return OpNum::Create; }
|
OpNum getOpNum() const override { return OpNum::Create; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return CreateResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||||
@ -194,6 +207,8 @@ struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return RemoveRequest::bytesSize() + sizeof(xid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||||
@ -201,6 +216,8 @@ struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer &) override {}
|
void readImpl(ReadBuffer &) override {}
|
||||||
void writeImpl(WriteBuffer &) const override {}
|
void writeImpl(WriteBuffer &) const override {}
|
||||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return RemoveResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||||
@ -211,6 +228,8 @@ struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return !has_watch; }
|
bool isReadRequest() const override { return !has_watch; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ExistsRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
||||||
@ -218,6 +237,8 @@ struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ExistsResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||||
@ -228,6 +249,8 @@ struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return !has_watch; }
|
bool isReadRequest() const override { return !has_watch; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return GetRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
||||||
@ -235,6 +258,8 @@ struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
OpNum getOpNum() const override { return OpNum::Get; }
|
OpNum getOpNum() const override { return OpNum::Get; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return GetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||||
@ -247,6 +272,8 @@ struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return false; }
|
bool isReadRequest() const override { return false; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return SetRequest::bytesSize() + sizeof(xid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
||||||
@ -254,6 +281,8 @@ struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
OpNum getOpNum() const override { return OpNum::Set; }
|
OpNum getOpNum() const override { return OpNum::Set; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return SetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
||||||
@ -263,6 +292,8 @@ struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return !has_watch; }
|
bool isReadRequest() const override { return !has_watch; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ListRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSimpleListRequest final : ZooKeeperListRequest
|
struct ZooKeeperSimpleListRequest final : ZooKeeperListRequest
|
||||||
@ -275,6 +306,8 @@ struct ZooKeeperListResponse : ListResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer & in) override;
|
void readImpl(ReadBuffer & in) override;
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
OpNum getOpNum() const override { return OpNum::List; }
|
OpNum getOpNum() const override { return OpNum::List; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ListResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
||||||
@ -293,6 +326,8 @@ struct ZooKeeperCheckRequest final : CheckRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override { return !has_watch; }
|
bool isReadRequest() const override { return !has_watch; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return CheckRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
||||||
@ -300,6 +335,8 @@ struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
|||||||
void readImpl(ReadBuffer &) override {}
|
void readImpl(ReadBuffer &) override {}
|
||||||
void writeImpl(WriteBuffer &) const override {}
|
void writeImpl(WriteBuffer &) const override {}
|
||||||
OpNum getOpNum() const override { return OpNum::Check; }
|
OpNum getOpNum() const override { return OpNum::Check; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return CheckResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This response may be received only as an element of responses in MultiResponse.
|
/// This response may be received only as an element of responses in MultiResponse.
|
||||||
@ -309,6 +346,8 @@ struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse
|
|||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
|
||||||
OpNum getOpNum() const override { return OpNum::Error; }
|
OpNum getOpNum() const override { return OpNum::Error; }
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return ErrorResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
||||||
@ -323,6 +362,8 @@ struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
|||||||
|
|
||||||
ZooKeeperResponsePtr makeResponse() const override;
|
ZooKeeperResponsePtr makeResponse() const override;
|
||||||
bool isReadRequest() const override;
|
bool isReadRequest() const override;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return MultiRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||||
@ -346,6 +387,41 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
|||||||
|
|
||||||
void writeImpl(WriteBuffer & out) const override;
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
|
||||||
|
size_t bytesSize() const override { return MultiResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Fake internal coordination (keeper) response. Never received from client
|
||||||
|
/// and never send to client.
|
||||||
|
struct ZooKeeperSessionIDRequest final : ZooKeeperRequest
|
||||||
|
{
|
||||||
|
int64_t internal_id;
|
||||||
|
int64_t session_timeout_ms;
|
||||||
|
/// Who requested this session
|
||||||
|
int32_t server_id;
|
||||||
|
|
||||||
|
Coordination::OpNum getOpNum() const override { return OpNum::SessionID; }
|
||||||
|
String getPath() const override { return {}; }
|
||||||
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
void readImpl(ReadBuffer & in) override;
|
||||||
|
|
||||||
|
Coordination::ZooKeeperResponsePtr makeResponse() const override;
|
||||||
|
bool isReadRequest() const override { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Fake internal coordination (keeper) response. Never received from client
|
||||||
|
/// and never send to client.
|
||||||
|
struct ZooKeeperSessionIDResponse final : ZooKeeperResponse
|
||||||
|
{
|
||||||
|
int64_t internal_id;
|
||||||
|
int64_t session_id;
|
||||||
|
/// Who requested this session
|
||||||
|
int32_t server_id;
|
||||||
|
|
||||||
|
void readImpl(ReadBuffer & in) override;
|
||||||
|
|
||||||
|
void writeImpl(WriteBuffer & out) const override;
|
||||||
|
|
||||||
|
Coordination::OpNum getOpNum() const override { return OpNum::SessionID; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class ZooKeeperRequestFactory final : private boost::noncopyable
|
class ZooKeeperRequestFactory final : private boost::noncopyable
|
||||||
|
@ -21,6 +21,7 @@ static const std::unordered_set<int32_t> VALID_OPERATIONS =
|
|||||||
static_cast<int32_t>(OpNum::Check),
|
static_cast<int32_t>(OpNum::Check),
|
||||||
static_cast<int32_t>(OpNum::Multi),
|
static_cast<int32_t>(OpNum::Multi),
|
||||||
static_cast<int32_t>(OpNum::Auth),
|
static_cast<int32_t>(OpNum::Auth),
|
||||||
|
static_cast<int32_t>(OpNum::SessionID),
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string toString(OpNum op_num)
|
std::string toString(OpNum op_num)
|
||||||
@ -55,6 +56,8 @@ std::string toString(OpNum op_num)
|
|||||||
return "Heartbeat";
|
return "Heartbeat";
|
||||||
case OpNum::Auth:
|
case OpNum::Auth:
|
||||||
return "Auth";
|
return "Auth";
|
||||||
|
case OpNum::SessionID:
|
||||||
|
return "SessionID";
|
||||||
}
|
}
|
||||||
int32_t raw_op = static_cast<int32_t>(op_num);
|
int32_t raw_op = static_cast<int32_t>(op_num);
|
||||||
throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED);
|
throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED);
|
||||||
|
@ -30,6 +30,7 @@ enum class OpNum : int32_t
|
|||||||
Check = 13,
|
Check = 13,
|
||||||
Multi = 14,
|
Multi = 14,
|
||||||
Auth = 100,
|
Auth = 100,
|
||||||
|
SessionID = 997, /// Special internal request
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string toString(OpNum op_num);
|
std::string toString(OpNum op_num);
|
||||||
|
@ -1012,6 +1012,16 @@ void ZooKeeper::pushRequest(RequestInfo && info)
|
|||||||
ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions);
|
ProfileEvents::increment(ProfileEvents::ZooKeeperTransactions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ZooKeeper::executeGenericRequest(
|
||||||
|
const ZooKeeperRequestPtr & request,
|
||||||
|
ResponseCallback callback)
|
||||||
|
{
|
||||||
|
RequestInfo request_info;
|
||||||
|
request_info.request = request;
|
||||||
|
request_info.callback = callback;
|
||||||
|
|
||||||
|
pushRequest(std::move(request_info));
|
||||||
|
}
|
||||||
|
|
||||||
void ZooKeeper::create(
|
void ZooKeeper::create(
|
||||||
const String & path,
|
const String & path,
|
||||||
|
@ -121,6 +121,9 @@ public:
|
|||||||
/// Useful to check owner of ephemeral node.
|
/// Useful to check owner of ephemeral node.
|
||||||
int64_t getSessionID() const override { return session_id; }
|
int64_t getSessionID() const override { return session_id; }
|
||||||
|
|
||||||
|
void executeGenericRequest(
|
||||||
|
const ZooKeeperRequestPtr & request,
|
||||||
|
ResponseCallback callback);
|
||||||
|
|
||||||
/// See the documentation about semantics of these methods in IKeeper class.
|
/// See the documentation about semantics of these methods in IKeeper class.
|
||||||
|
|
||||||
|
@ -15,3 +15,4 @@
|
|||||||
#cmakedefine01 USE_GRPC
|
#cmakedefine01 USE_GRPC
|
||||||
#cmakedefine01 USE_STATS
|
#cmakedefine01 USE_STATS
|
||||||
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
|
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
|
||||||
|
#cmakedefine01 USE_DATASKETCHES
|
||||||
|
@ -80,7 +80,7 @@ public:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
||||||
off_t appendRecord(ChangelogRecord && record, bool sync)
|
off_t appendRecord(ChangelogRecord && record)
|
||||||
{
|
{
|
||||||
off_t result = plain_buf.count();
|
off_t result = plain_buf.count();
|
||||||
writeIntBinary(computeRecordChecksum(record), plain_buf);
|
writeIntBinary(computeRecordChecksum(record), plain_buf);
|
||||||
@ -96,23 +96,21 @@ public:
|
|||||||
|
|
||||||
entries_written++;
|
entries_written++;
|
||||||
|
|
||||||
if (sync)
|
|
||||||
plain_buf.sync();
|
|
||||||
else
|
|
||||||
plain_buf.next();
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void truncateToLength(off_t new_length)
|
void truncateToLength(off_t new_length)
|
||||||
{
|
{
|
||||||
flush();
|
plain_buf.next();
|
||||||
plain_buf.truncate(new_length);
|
plain_buf.truncate(new_length);
|
||||||
plain_buf.seek(new_length, SEEK_SET);
|
plain_buf.seek(new_length, SEEK_SET);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush()
|
void flush(bool force_fsync)
|
||||||
{
|
{
|
||||||
plain_buf.sync();
|
plain_buf.next();
|
||||||
|
if (force_fsync)
|
||||||
|
plain_buf.sync();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t getEntriesWritten() const
|
uint64_t getEntriesWritten() const
|
||||||
@ -247,9 +245,14 @@ private:
|
|||||||
ReadBufferFromFile read_buf;
|
ReadBufferFromFile read_buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
Changelog::Changelog(const std::string & changelogs_dir_, uint64_t rotate_interval_, Poco::Logger * log_)
|
Changelog::Changelog(
|
||||||
|
const std::string & changelogs_dir_,
|
||||||
|
uint64_t rotate_interval_,
|
||||||
|
bool force_sync_,
|
||||||
|
Poco::Logger * log_)
|
||||||
: changelogs_dir(changelogs_dir_)
|
: changelogs_dir(changelogs_dir_)
|
||||||
, rotate_interval(rotate_interval_)
|
, rotate_interval(rotate_interval_)
|
||||||
|
, force_sync(force_sync_)
|
||||||
, log(log_)
|
, log(log_)
|
||||||
{
|
{
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
@ -357,6 +360,9 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
|
|||||||
|
|
||||||
void Changelog::rotate(uint64_t new_start_log_index)
|
void Changelog::rotate(uint64_t new_start_log_index)
|
||||||
{
|
{
|
||||||
|
/// Flush previous log
|
||||||
|
flush();
|
||||||
|
|
||||||
ChangelogFileDescription new_description;
|
ChangelogFileDescription new_description;
|
||||||
new_description.prefix = DEFAULT_PREFIX;
|
new_description.prefix = DEFAULT_PREFIX;
|
||||||
new_description.from_log_index = new_start_log_index;
|
new_description.from_log_index = new_start_log_index;
|
||||||
@ -387,7 +393,7 @@ ChangelogRecord Changelog::buildRecord(uint64_t index, const LogEntryPtr & log_e
|
|||||||
return record;
|
return record;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Changelog::appendEntry(uint64_t index, const LogEntryPtr & log_entry, bool force_sync)
|
void Changelog::appendEntry(uint64_t index, const LogEntryPtr & log_entry)
|
||||||
{
|
{
|
||||||
if (!current_writer)
|
if (!current_writer)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Changelog must be initialized before appending records");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Changelog must be initialized before appending records");
|
||||||
@ -398,14 +404,14 @@ void Changelog::appendEntry(uint64_t index, const LogEntryPtr & log_entry, bool
|
|||||||
if (current_writer->getEntriesWritten() == rotate_interval)
|
if (current_writer->getEntriesWritten() == rotate_interval)
|
||||||
rotate(index);
|
rotate(index);
|
||||||
|
|
||||||
auto offset = current_writer->appendRecord(buildRecord(index, log_entry), force_sync);
|
auto offset = current_writer->appendRecord(buildRecord(index, log_entry));
|
||||||
if (!index_to_start_pos.try_emplace(index, offset).second)
|
if (!index_to_start_pos.try_emplace(index, offset).second)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Record with index {} already exists", index);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Record with index {} already exists", index);
|
||||||
|
|
||||||
logs[index] = makeClone(log_entry);
|
logs[index] = makeClone(log_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry, bool force_sync)
|
void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
|
||||||
{
|
{
|
||||||
if (index_to_start_pos.count(index) == 0)
|
if (index_to_start_pos.count(index) == 0)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write at index {} because changelog doesn't contain it", index);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write at index {} because changelog doesn't contain it", index);
|
||||||
@ -451,7 +457,7 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry, bool forc
|
|||||||
|
|
||||||
current_writer->setEntriesWritten(entries_written);
|
current_writer->setEntriesWritten(entries_written);
|
||||||
|
|
||||||
appendEntry(index, log_entry, force_sync);
|
appendEntry(index, log_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Changelog::compact(uint64_t up_to_log_index)
|
void Changelog::compact(uint64_t up_to_log_index)
|
||||||
@ -540,7 +546,7 @@ nuraft::ptr<nuraft::buffer> Changelog::serializeEntriesToBuffer(uint64_t index,
|
|||||||
return buf_out;
|
return buf_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Changelog::applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer, bool force_sync)
|
void Changelog::applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer)
|
||||||
{
|
{
|
||||||
buffer.pos(0);
|
buffer.pos(0);
|
||||||
int num_logs = buffer.get_int();
|
int num_logs = buffer.get_int();
|
||||||
@ -555,23 +561,23 @@ void Changelog::applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer,
|
|||||||
|
|
||||||
LogEntryPtr log_entry = nuraft::log_entry::deserialize(*buf_local);
|
LogEntryPtr log_entry = nuraft::log_entry::deserialize(*buf_local);
|
||||||
if (i == 0 && logs.count(cur_index))
|
if (i == 0 && logs.count(cur_index))
|
||||||
writeAt(cur_index, log_entry, force_sync);
|
writeAt(cur_index, log_entry);
|
||||||
else
|
else
|
||||||
appendEntry(cur_index, log_entry, force_sync);
|
appendEntry(cur_index, log_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Changelog::flush()
|
void Changelog::flush()
|
||||||
{
|
{
|
||||||
current_writer->flush();
|
if (current_writer)
|
||||||
|
current_writer->flush(force_sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
Changelog::~Changelog()
|
Changelog::~Changelog()
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (current_writer)
|
flush();
|
||||||
current_writer->flush();
|
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -63,17 +63,17 @@ class Changelog
|
|||||||
{
|
{
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Changelog(const std::string & changelogs_dir_, uint64_t rotate_interval_, Poco::Logger * log_);
|
Changelog(const std::string & changelogs_dir_, uint64_t rotate_interval_, bool force_sync_, Poco::Logger * log_);
|
||||||
|
|
||||||
/// Read changelog from files on changelogs_dir_ skipping all entries before from_log_index
|
/// Read changelog from files on changelogs_dir_ skipping all entries before from_log_index
|
||||||
/// Truncate broken entries, remove files after broken entries.
|
/// Truncate broken entries, remove files after broken entries.
|
||||||
void readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep);
|
void readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep);
|
||||||
|
|
||||||
/// Add entry to log with index. Call fsync if force_sync true.
|
/// Add entry to log with index.
|
||||||
void appendEntry(uint64_t index, const LogEntryPtr & log_entry, bool force_sync);
|
void appendEntry(uint64_t index, const LogEntryPtr & log_entry);
|
||||||
|
|
||||||
/// Write entry at index and truncate all subsequent entries.
|
/// Write entry at index and truncate all subsequent entries.
|
||||||
void writeAt(uint64_t index, const LogEntryPtr & log_entry, bool force_sync);
|
void writeAt(uint64_t index, const LogEntryPtr & log_entry);
|
||||||
|
|
||||||
/// Remove log files with to_log_index <= up_to_log_index.
|
/// Remove log files with to_log_index <= up_to_log_index.
|
||||||
void compact(uint64_t up_to_log_index);
|
void compact(uint64_t up_to_log_index);
|
||||||
@ -101,9 +101,9 @@ public:
|
|||||||
BufferPtr serializeEntriesToBuffer(uint64_t index, int32_t count);
|
BufferPtr serializeEntriesToBuffer(uint64_t index, int32_t count);
|
||||||
|
|
||||||
/// Apply entries from buffer overriding existing entries
|
/// Apply entries from buffer overriding existing entries
|
||||||
void applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer, bool force_sync);
|
void applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer);
|
||||||
|
|
||||||
/// Fsync log to disk
|
/// Fsync latest log to disk and flush buffer
|
||||||
void flush();
|
void flush();
|
||||||
|
|
||||||
uint64_t size() const
|
uint64_t size() const
|
||||||
@ -124,6 +124,7 @@ private:
|
|||||||
private:
|
private:
|
||||||
const std::string changelogs_dir;
|
const std::string changelogs_dir;
|
||||||
const uint64_t rotate_interval;
|
const uint64_t rotate_interval;
|
||||||
|
const bool force_sync;
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
std::map<uint64_t, ChangelogFileDescription> existing_changelogs;
|
std::map<uint64_t, ChangelogFileDescription> existing_changelogs;
|
||||||
|
@ -22,18 +22,19 @@ struct Settings;
|
|||||||
M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \
|
M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \
|
||||||
M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
||||||
M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Lower bound of election timer (avoid too often leader elections)", 0) \
|
||||||
M(UInt64, reserved_log_items, 10000, "How many log items to store (don't remove during compaction)", 0) \
|
M(UInt64, reserved_log_items, 100000, "How many log items to store (don't remove during compaction)", 0) \
|
||||||
M(UInt64, snapshot_distance, 10000, "How many log items we have to collect to write new snapshot", 0) \
|
M(UInt64, snapshot_distance, 100000, "How many log items we have to collect to write new snapshot", 0) \
|
||||||
M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \
|
M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \
|
||||||
M(Milliseconds, shutdown_timeout, 5000, "How many time we will until RAFT shutdown", 0) \
|
M(Milliseconds, shutdown_timeout, 5000, "How many time we will until RAFT shutdown", 0) \
|
||||||
M(Milliseconds, startup_timeout, 30000, "How many time we will until RAFT to start", 0) \
|
M(Milliseconds, startup_timeout, 30000, "How many time we will until RAFT to start", 0) \
|
||||||
M(LogsLevel, raft_logs_level, LogsLevel::information, "Log internal RAFT logs into main server log level. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
M(LogsLevel, raft_logs_level, LogsLevel::information, "Log internal RAFT logs into main server log level. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
||||||
M(UInt64, rotate_log_storage_interval, 10000, "How many records will be stored in one log storage file", 0) \
|
M(UInt64, rotate_log_storage_interval, 100000, "How many records will be stored in one log storage file", 0) \
|
||||||
M(UInt64, snapshots_to_keep, 3, "How many compressed snapshots to keep on disk", 0) \
|
M(UInt64, snapshots_to_keep, 3, "How many compressed snapshots to keep on disk", 0) \
|
||||||
M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \
|
M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \
|
||||||
M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \
|
M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \
|
||||||
|
M(UInt64, max_requests_batch_size, 100, "Max size of batch in requests count before it will be sent to RAFT", 0) \
|
||||||
M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \
|
M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \
|
||||||
M(Bool, force_sync, true, " Call fsync on each change in RAFT changelog", 0)
|
M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0)
|
||||||
|
|
||||||
DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS)
|
DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS)
|
||||||
|
|
||||||
|
@ -5,9 +5,12 @@ namespace DB
|
|||||||
|
|
||||||
KeeperLogStore::KeeperLogStore(const std::string & changelogs_path, uint64_t rotate_interval_, bool force_sync_)
|
KeeperLogStore::KeeperLogStore(const std::string & changelogs_path, uint64_t rotate_interval_, bool force_sync_)
|
||||||
: log(&Poco::Logger::get("KeeperLogStore"))
|
: log(&Poco::Logger::get("KeeperLogStore"))
|
||||||
, changelog(changelogs_path, rotate_interval_, log)
|
, changelog(changelogs_path, rotate_interval_, force_sync_, log)
|
||||||
, force_sync(force_sync_)
|
|
||||||
{
|
{
|
||||||
|
if (force_sync_)
|
||||||
|
LOG_INFO(log, "force_sync enabled");
|
||||||
|
else
|
||||||
|
LOG_INFO(log, "force_sync disabled");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t KeeperLogStore::start_index() const
|
uint64_t KeeperLogStore::start_index() const
|
||||||
@ -38,7 +41,7 @@ uint64_t KeeperLogStore::append(nuraft::ptr<nuraft::log_entry> & entry)
|
|||||||
{
|
{
|
||||||
std::lock_guard lock(changelog_lock);
|
std::lock_guard lock(changelog_lock);
|
||||||
uint64_t idx = changelog.getNextEntryIndex();
|
uint64_t idx = changelog.getNextEntryIndex();
|
||||||
changelog.appendEntry(idx, entry, force_sync);
|
changelog.appendEntry(idx, entry);
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,7 +49,7 @@ uint64_t KeeperLogStore::append(nuraft::ptr<nuraft::log_entry> & entry)
|
|||||||
void KeeperLogStore::write_at(uint64_t index, nuraft::ptr<nuraft::log_entry> & entry)
|
void KeeperLogStore::write_at(uint64_t index, nuraft::ptr<nuraft::log_entry> & entry)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(changelog_lock);
|
std::lock_guard lock(changelog_lock);
|
||||||
changelog.writeAt(index, entry, force_sync);
|
changelog.writeAt(index, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
nuraft::ptr<std::vector<nuraft::ptr<nuraft::log_entry>>> KeeperLogStore::log_entries(uint64_t start, uint64_t end)
|
nuraft::ptr<std::vector<nuraft::ptr<nuraft::log_entry>>> KeeperLogStore::log_entries(uint64_t start, uint64_t end)
|
||||||
@ -93,7 +96,7 @@ bool KeeperLogStore::flush()
|
|||||||
void KeeperLogStore::apply_pack(uint64_t index, nuraft::buffer & pack)
|
void KeeperLogStore::apply_pack(uint64_t index, nuraft::buffer & pack)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(changelog_lock);
|
std::lock_guard lock(changelog_lock);
|
||||||
changelog.applyEntriesFromBuffer(index, pack, force_sync);
|
changelog.applyEntriesFromBuffer(index, pack);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t KeeperLogStore::size() const
|
uint64_t KeeperLogStore::size() const
|
||||||
@ -102,4 +105,10 @@ uint64_t KeeperLogStore::size() const
|
|||||||
return changelog.size();
|
return changelog.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KeeperLogStore::end_of_append_batch(uint64_t /*start_index*/, uint64_t /*count*/)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(changelog_lock);
|
||||||
|
changelog.flush();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -42,11 +42,12 @@ public:
|
|||||||
|
|
||||||
uint64_t size() const;
|
uint64_t size() const;
|
||||||
|
|
||||||
|
void end_of_append_batch(uint64_t start_index, uint64_t count) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
mutable std::mutex changelog_lock;
|
mutable std::mutex changelog_lock;
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
Changelog changelog;
|
Changelog changelog;
|
||||||
bool force_sync;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ namespace ErrorCodes
|
|||||||
extern const int RAFT_ERROR;
|
extern const int RAFT_ERROR;
|
||||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -73,7 +74,6 @@ KeeperServer::KeeperServer(
|
|||||||
config.getString("keeper_server.snapshot_storage_path", config.getString("path", DBMS_DEFAULT_PATH) + "coordination/snapshots"),
|
config.getString("keeper_server.snapshot_storage_path", config.getString("path", DBMS_DEFAULT_PATH) + "coordination/snapshots"),
|
||||||
coordination_settings))
|
coordination_settings))
|
||||||
, state_manager(nuraft::cs_new<KeeperStateManager>(server_id, "keeper_server", config, coordination_settings))
|
, state_manager(nuraft::cs_new<KeeperStateManager>(server_id, "keeper_server", config, coordination_settings))
|
||||||
, responses_queue(responses_queue_)
|
|
||||||
, log(&Poco::Logger::get("KeeperServer"))
|
, log(&Poco::Logger::get("KeeperServer"))
|
||||||
{
|
{
|
||||||
if (coordination_settings->quorum_reads)
|
if (coordination_settings->quorum_reads)
|
||||||
@ -111,7 +111,7 @@ void KeeperServer::startup()
|
|||||||
params.auto_forwarding_ = coordination_settings->auto_forwarding;
|
params.auto_forwarding_ = coordination_settings->auto_forwarding;
|
||||||
params.auto_forwarding_req_timeout_ = coordination_settings->operation_timeout_ms.totalMilliseconds() * 2;
|
params.auto_forwarding_req_timeout_ = coordination_settings->operation_timeout_ms.totalMilliseconds() * 2;
|
||||||
|
|
||||||
params.return_method_ = nuraft::raft_params::blocking;
|
params.return_method_ = nuraft::raft_params::async_handler;
|
||||||
|
|
||||||
nuraft::asio_service::options asio_opts{};
|
nuraft::asio_service::options asio_opts{};
|
||||||
if (state_manager->isSecure())
|
if (state_manager->isSecure())
|
||||||
@ -222,75 +222,26 @@ nuraft::ptr<nuraft::buffer> getZooKeeperLogEntry(int64_t session_id, const Coord
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperServer::putRequest(const KeeperStorage::RequestForSession & request_for_session)
|
|
||||||
|
void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession & request_for_session)
|
||||||
{
|
{
|
||||||
auto [session_id, request] = request_for_session;
|
if (!request_for_session.request->isReadRequest())
|
||||||
if (!coordination_settings->quorum_reads && isLeaderAlive() && request->isReadRequest())
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot process non-read request locally");
|
||||||
{
|
|
||||||
state_machine->processReadRequest(request_for_session);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
|
||||||
entries.push_back(getZooKeeperLogEntry(session_id, request));
|
|
||||||
|
|
||||||
std::lock_guard lock(append_entries_mutex);
|
state_machine->processReadRequest(request_for_session);
|
||||||
|
|
||||||
auto result = raft_instance->append_entries(entries);
|
|
||||||
if (!result->get_accepted())
|
|
||||||
{
|
|
||||||
KeeperStorage::ResponsesForSessions responses;
|
|
||||||
auto response = request->makeResponse();
|
|
||||||
response->xid = request->xid;
|
|
||||||
response->zxid = 0;
|
|
||||||
response->error = Coordination::Error::ZOPERATIONTIMEOUT;
|
|
||||||
responses_queue.push(DB::KeeperStorage::ResponseForSession{session_id, response});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result->get_result_code() == nuraft::cmd_result_code::TIMEOUT)
|
|
||||||
{
|
|
||||||
KeeperStorage::ResponsesForSessions responses;
|
|
||||||
auto response = request->makeResponse();
|
|
||||||
response->xid = request->xid;
|
|
||||||
response->zxid = 0;
|
|
||||||
response->error = Coordination::Error::ZOPERATIONTIMEOUT;
|
|
||||||
responses_queue.push(DB::KeeperStorage::ResponseForSession{session_id, response});
|
|
||||||
}
|
|
||||||
else if (result->get_result_code() != nuraft::cmd_result_code::OK)
|
|
||||||
throw Exception(ErrorCodes::RAFT_ERROR, "Requests result failed with code {} and message: '{}'", result->get_result_code(), result->get_result_str());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t KeeperServer::getSessionID(int64_t session_timeout_ms)
|
RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
||||||
{
|
{
|
||||||
/// Just some sanity check. We don't want to make a lot of clients wait with lock.
|
|
||||||
if (active_session_id_requests > 10)
|
|
||||||
throw Exception(ErrorCodes::RAFT_ERROR, "Too many concurrent SessionID requests already in flight");
|
|
||||||
|
|
||||||
++active_session_id_requests;
|
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
||||||
SCOPE_EXIT({ --active_session_id_requests; });
|
for (const auto & [session_id, request] : requests_for_sessions)
|
||||||
|
entries.push_back(getZooKeeperLogEntry(session_id, request));
|
||||||
|
|
||||||
auto entry = nuraft::buffer::alloc(sizeof(int64_t));
|
{
|
||||||
/// Just special session request
|
std::lock_guard lock(append_entries_mutex);
|
||||||
nuraft::buffer_serializer bs(entry);
|
return raft_instance->append_entries(entries);
|
||||||
bs.put_i64(session_timeout_ms);
|
}
|
||||||
|
|
||||||
std::lock_guard lock(append_entries_mutex);
|
|
||||||
|
|
||||||
auto result = raft_instance->append_entries({entry});
|
|
||||||
|
|
||||||
if (!result->get_accepted())
|
|
||||||
throw Exception(ErrorCodes::RAFT_ERROR, "Cannot send session_id request to RAFT");
|
|
||||||
|
|
||||||
if (result->get_result_code() != nuraft::cmd_result_code::OK)
|
|
||||||
throw Exception(ErrorCodes::RAFT_ERROR, "session_id request failed to RAFT");
|
|
||||||
|
|
||||||
auto resp = result->get();
|
|
||||||
if (resp == nullptr)
|
|
||||||
throw Exception(ErrorCodes::RAFT_ERROR, "Received nullptr as session_id");
|
|
||||||
|
|
||||||
nuraft::buffer_serializer bs_resp(resp);
|
|
||||||
return bs_resp.get_i64();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KeeperServer::isLeader() const
|
bool KeeperServer::isLeader() const
|
||||||
|
@ -12,10 +12,12 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
using RaftAppendResult = nuraft::ptr<nuraft::cmd_result<nuraft::ptr<nuraft::buffer>>>;
|
||||||
|
|
||||||
class KeeperServer
|
class KeeperServer
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
int server_id;
|
const int server_id;
|
||||||
|
|
||||||
CoordinationSettingsPtr coordination_settings;
|
CoordinationSettingsPtr coordination_settings;
|
||||||
|
|
||||||
@ -29,13 +31,10 @@ private:
|
|||||||
|
|
||||||
std::mutex append_entries_mutex;
|
std::mutex append_entries_mutex;
|
||||||
|
|
||||||
ResponsesQueue & responses_queue;
|
|
||||||
|
|
||||||
std::mutex initialized_mutex;
|
std::mutex initialized_mutex;
|
||||||
std::atomic<bool> initialized_flag = false;
|
std::atomic<bool> initialized_flag = false;
|
||||||
std::condition_variable initialized_cv;
|
std::condition_variable initialized_cv;
|
||||||
std::atomic<bool> initial_batch_committed = false;
|
std::atomic<bool> initial_batch_committed = false;
|
||||||
std::atomic<size_t> active_session_id_requests = 0;
|
|
||||||
|
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
@ -60,9 +59,9 @@ public:
|
|||||||
|
|
||||||
void startup();
|
void startup();
|
||||||
|
|
||||||
void putRequest(const KeeperStorage::RequestForSession & request);
|
void putLocalReadRequest(const KeeperStorage::RequestForSession & request);
|
||||||
|
|
||||||
int64_t getSessionID(int64_t session_timeout_ms);
|
RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests);
|
||||||
|
|
||||||
std::unordered_set<int64_t> getDeadSessions();
|
std::unordered_set<int64_t> getDeadSessions();
|
||||||
|
|
||||||
@ -73,6 +72,8 @@ public:
|
|||||||
void waitInit();
|
void waitInit();
|
||||||
|
|
||||||
void shutdown();
|
void shutdown();
|
||||||
|
|
||||||
|
int getServerID() const { return server_id; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -90,25 +90,29 @@ void KeeperStateMachine::init()
|
|||||||
|
|
||||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
|
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
|
||||||
{
|
{
|
||||||
if (data.size() == sizeof(int64_t))
|
auto request_for_session = parseRequest(data);
|
||||||
|
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||||
{
|
{
|
||||||
nuraft::buffer_serializer timeout_data(data);
|
const Coordination::ZooKeeperSessionIDRequest & session_id_request = dynamic_cast<const Coordination::ZooKeeperSessionIDRequest &>(*request_for_session.request);
|
||||||
int64_t session_timeout_ms = timeout_data.get_i64();
|
|
||||||
auto response = nuraft::buffer::alloc(sizeof(int64_t));
|
|
||||||
int64_t session_id;
|
int64_t session_id;
|
||||||
nuraft::buffer_serializer bs(response);
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(storage_lock);
|
std::lock_guard lock(storage_lock);
|
||||||
session_id = storage->getSessionID(session_timeout_ms);
|
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
|
||||||
bs.put_i64(session_id);
|
|
||||||
}
|
}
|
||||||
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_timeout_ms);
|
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
|
||||||
last_committed_idx = log_idx;
|
|
||||||
return response;
|
std::shared_ptr<Coordination::ZooKeeperSessionIDResponse> response = std::make_shared<Coordination::ZooKeeperSessionIDResponse>();
|
||||||
|
response->internal_id = session_id_request.internal_id;
|
||||||
|
response->session_id = session_id;
|
||||||
|
response->server_id = session_id_request.server_id;
|
||||||
|
|
||||||
|
KeeperStorage::ResponseForSession response_for_session;
|
||||||
|
response_for_session.session_id = -1;
|
||||||
|
response_for_session.response = response;
|
||||||
|
responses_queue.push(response_for_session);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto request_for_session = parseRequest(data);
|
|
||||||
KeeperStorage::ResponsesForSessions responses_for_sessions;
|
KeeperStorage::ResponsesForSessions responses_for_sessions;
|
||||||
{
|
{
|
||||||
std::lock_guard lock(storage_lock);
|
std::lock_guard lock(storage_lock);
|
||||||
@ -116,10 +120,10 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
|||||||
for (auto & response_for_session : responses_for_sessions)
|
for (auto & response_for_session : responses_for_sessions)
|
||||||
responses_queue.push(response_for_session);
|
responses_queue.push(response_for_session);
|
||||||
}
|
}
|
||||||
|
|
||||||
last_committed_idx = log_idx;
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
last_committed_idx = log_idx;
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user