Merge remote-tracking branch 'origin/master' into dev_map2

This commit is contained in:
hexiaoting 2020-11-26 16:43:27 +08:00
commit 5b3a7f5c98
682 changed files with 15994 additions and 4040 deletions

View File

@ -2,8 +2,7 @@
name: Documentation issue
about: Report something incorrect or missing in documentation
title: ''
labels: documentation
assignees: BayoNet
labels: comp-documentation
---

2
.gitignore vendored
View File

@ -124,3 +124,5 @@ website/package-lock.json
# Toolchains
/cmake/toolchain/*
*.iml

8
.gitmodules vendored
View File

@ -44,6 +44,7 @@
[submodule "contrib/protobuf"]
path = contrib/protobuf
url = https://github.com/ClickHouse-Extras/protobuf.git
branch = v3.13.0.1
[submodule "contrib/boost"]
path = contrib/boost
url = https://github.com/ClickHouse-Extras/boost.git
@ -107,6 +108,7 @@
[submodule "contrib/grpc"]
path = contrib/grpc
url = https://github.com/ClickHouse-Extras/grpc.git
branch = v1.33.2
[submodule "contrib/aws"]
path = contrib/aws
url = https://github.com/ClickHouse-Extras/aws-sdk-cpp.git
@ -196,7 +198,11 @@
[submodule "contrib/rocksdb"]
path = contrib/rocksdb
url = https://github.com/facebook/rocksdb
branch = v6.11.4
branch = v6.14.5
[submodule "contrib/xz"]
path = contrib/xz
url = https://github.com/xz-mirror/xz
[submodule "contrib/abseil-cpp"]
path = contrib/abseil-cpp
url = https://github.com/ClickHouse-Extras/abseil-cpp.git
branch = lts_2020_02_25

View File

@ -1,6 +1,7 @@
#pragma once
#include <cassert>
#include <stdexcept> // for std::logic_error
#include <string>
#include <vector>
#include <functional>

View File

@ -3,7 +3,6 @@
/// Macros for convenient usage of Poco logger.
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <Poco/Logger.h>
#include <Poco/Message.h>
#include <Common/CurrentThread.h>

View File

@ -0,0 +1,19 @@
#define _GNU_SOURCE
#include <sys/socket.h>
#include <errno.h>
#include <fcntl.h>
#include "syscall.h"
int accept4(int fd, struct sockaddr *restrict addr, socklen_t *restrict len, int flg)
{
if (!flg) return accept(fd, addr, len);
int ret = socketcall_cp(accept4, fd, addr, len, flg, 0, 0);
if (ret>=0 || (errno != ENOSYS && errno != EINVAL)) return ret;
ret = accept(fd, addr, len);
if (ret<0) return ret;
if (flg & SOCK_CLOEXEC)
__syscall(SYS_fcntl, ret, F_SETFD, FD_CLOEXEC);
if (flg & SOCK_NONBLOCK)
__syscall(SYS_fcntl, ret, F_SETFL, O_NONBLOCK);
return ret;
}

View File

@ -0,0 +1,37 @@
#include <sys/epoll.h>
#include <signal.h>
#include <errno.h>
#include "syscall.h"
int epoll_create(int size)
{
return epoll_create1(0);
}
int epoll_create1(int flags)
{
int r = __syscall(SYS_epoll_create1, flags);
#ifdef SYS_epoll_create
if (r==-ENOSYS && !flags) r = __syscall(SYS_epoll_create, 1);
#endif
return __syscall_ret(r);
}
int epoll_ctl(int fd, int op, int fd2, struct epoll_event *ev)
{
return syscall(SYS_epoll_ctl, fd, op, fd2, ev);
}
int epoll_pwait(int fd, struct epoll_event *ev, int cnt, int to, const sigset_t *sigs)
{
int r = __syscall(SYS_epoll_pwait, fd, ev, cnt, to, sigs, _NSIG/8);
#ifdef SYS_epoll_wait
if (r==-ENOSYS && !sigs) r = __syscall(SYS_epoll_wait, fd, ev, cnt, to);
#endif
return __syscall_ret(r);
}
int epoll_wait(int fd, struct epoll_event *ev, int cnt, int to)
{
return epoll_pwait(fd, ev, cnt, to, 0);
}

View File

@ -0,0 +1,23 @@
#include <sys/eventfd.h>
#include <unistd.h>
#include <errno.h>
#include "syscall.h"
int eventfd(unsigned int count, int flags)
{
int r = __syscall(SYS_eventfd2, count, flags);
#ifdef SYS_eventfd
if (r==-ENOSYS && !flags) r = __syscall(SYS_eventfd, count);
#endif
return __syscall_ret(r);
}
int eventfd_read(int fd, eventfd_t *value)
{
return (sizeof(*value) == read(fd, value, sizeof(*value))) ? 0 : -1;
}
int eventfd_write(int fd, eventfd_t value)
{
return (sizeof(value) == write(fd, &value, sizeof(value))) ? 0 : -1;
}

View File

@ -0,0 +1,45 @@
#include <sys/auxv.h>
#include <unistd.h> // __environ
#include <errno.h>
// We don't have libc struct available here. Compute aux vector manually.
static unsigned long * __auxv = NULL;
static unsigned long __auxv_secure = 0;
static size_t __find_auxv(unsigned long type)
{
size_t i;
for (i = 0; __auxv[i]; i += 2)
{
if (__auxv[i] == type)
return i + 1;
}
return (size_t) -1;
}
__attribute__((constructor)) static void __auxv_init()
{
size_t i;
for (i = 0; __environ[i]; i++);
__auxv = (unsigned long *) (__environ + i + 1);
size_t secure_idx = __find_auxv(AT_SECURE);
if (secure_idx != ((size_t) -1))
__auxv_secure = __auxv[secure_idx];
}
unsigned long getauxval(unsigned long type)
{
if (type == AT_SECURE)
return __auxv_secure;
if (__auxv)
{
size_t index = __find_auxv(type);
if (index != ((size_t) -1))
return __auxv[index];
}
errno = ENOENT;
return 0;
}

View File

@ -0,0 +1,8 @@
#define _GNU_SOURCE
#include <stdlib.h>
#include <sys/auxv.h>
char * secure_getenv(const char * name)
{
return getauxval(AT_SECURE) ? NULL : getenv(name);
}

View File

@ -13,3 +13,11 @@ long __syscall(syscall_arg_t, ...);
__attribute__((visibility("hidden")))
void *__vdsosym(const char *, const char *);
#define syscall(...) __syscall_ret(__syscall(__VA_ARGS__))
#define socketcall(...) __syscall_ret(__socketcall(__VA_ARGS__))
#define __socketcall(nm,a,b,c,d,e,f) __syscall(SYS_##nm, a, b, c, d, e, f)
#define socketcall_cp socketcall

View File

@ -40,24 +40,10 @@ static int checkver(Verdef *def, int vsym, const char *vername, char *strings)
#define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON)
#define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
extern char** environ;
static Ehdr *eh = NULL;
void *__vdsosym(const char *vername, const char *name);
// We don't have libc struct available here. Compute aux vector manually.
__attribute__((constructor)) static void auxv_init()
{
size_t i, *auxv;
for (i=0; environ[i]; i++);
auxv = (void *)(environ+i+1);
for (i=0; auxv[i] != AT_SYSINFO_EHDR; i+=2)
if (!auxv[i]) return;
if (!auxv[i+1]) return;
eh = (void *)auxv[i+1];
}
void *__vdsosym(const char *vername, const char *name)
{
size_t i;
Ehdr * eh = (void *) getauxval(AT_SYSINFO_EHDR);
if (!eh) return 0;
Phdr *ph = (void *)((char *)eh + eh->e_phoff);
size_t *dynv=0, base=-1;

View File

@ -6,11 +6,9 @@ Defines the following variables:
The include directories of the gRPC framework, including the include directories of the C++ wrapper.
``gRPC_LIBRARIES``
The libraries of the gRPC framework.
``gRPC_UNSECURE_LIBRARIES``
The libraries of the gRPC framework without SSL.
``_gRPC_CPP_PLUGIN``
``gRPC_CPP_PLUGIN``
The plugin for generating gRPC client and server C++ stubs from `.proto` files
``_gRPC_PYTHON_PLUGIN``
``gRPC_PYTHON_PLUGIN``
The plugin for generating gRPC client and server Python stubs from `.proto` files
The following :prop_tgt:`IMPORTED` targets are also defined:
@ -19,6 +17,13 @@ The following :prop_tgt:`IMPORTED` targets are also defined:
``grpc_cpp_plugin``
``grpc_python_plugin``
Set the following variables to adjust the behaviour of this script:
``gRPC_USE_UNSECURE_LIBRARIES``
if set gRPC_LIBRARIES will be filled with the unsecure version of the libraries (i.e. without SSL)
instead of the secure ones.
``gRPC_DEBUG`
if set the debug message will be printed.
Add custom commands to process ``.proto`` files to C++::
protobuf_generate_grpc_cpp(<SRCS> <HDRS>
[DESCRIPTORS <DESC>] [EXPORT_MACRO <MACRO>] [<ARGN>...])
@ -242,6 +247,7 @@ find_library(gRPC_LIBRARY NAMES grpc)
find_library(gRPC_CPP_LIBRARY NAMES grpc++)
find_library(gRPC_UNSECURE_LIBRARY NAMES grpc_unsecure)
find_library(gRPC_CPP_UNSECURE_LIBRARY NAMES grpc++_unsecure)
find_library(gRPC_CARES_LIBRARY NAMES cares)
set(gRPC_LIBRARIES)
if(gRPC_USE_UNSECURE_LIBRARIES)
@ -259,6 +265,7 @@ else()
set(gRPC_LIBRARIES ${gRPC_LIBRARIES} ${gRPC_CPP_LIBRARY})
endif()
endif()
set(gRPC_LIBRARIES ${gRPC_LIBRARIES} ${gRPC_CARES_LIBRARY})
# Restore the original find library ordering.
if(gRPC_USE_STATIC_LIBS)
@ -278,11 +285,11 @@ else()
endif()
# Get full path to plugin.
find_program(_gRPC_CPP_PLUGIN
find_program(gRPC_CPP_PLUGIN
NAMES grpc_cpp_plugin
DOC "The plugin for generating gRPC client and server C++ stubs from `.proto` files")
find_program(_gRPC_PYTHON_PLUGIN
find_program(gRPC_PYTHON_PLUGIN
NAMES grpc_python_plugin
DOC "The plugin for generating gRPC client and server Python stubs from `.proto` files")
@ -317,14 +324,14 @@ endif()
#include(FindPackageHandleStandardArgs.cmake)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(gRPC
REQUIRED_VARS gRPC_LIBRARY gRPC_CPP_LIBRARY gRPC_UNSECURE_LIBRARY gRPC_CPP_UNSECURE_LIBRARY
gRPC_INCLUDE_DIR gRPC_CPP_INCLUDE_DIR _gRPC_CPP_PLUGIN _gRPC_PYTHON_PLUGIN)
REQUIRED_VARS gRPC_LIBRARY gRPC_CPP_LIBRARY gRPC_UNSECURE_LIBRARY gRPC_CPP_UNSECURE_LIBRARY gRPC_CARES_LIBRARY
gRPC_INCLUDE_DIR gRPC_CPP_INCLUDE_DIR gRPC_CPP_PLUGIN gRPC_PYTHON_PLUGIN)
if(gRPC_FOUND)
if(gRPC_DEBUG)
message(STATUS "gRPC: INCLUDE_DIRS=${gRPC_INCLUDE_DIRS}")
message(STATUS "gRPC: LIBRARIES=${gRPC_LIBRARIES}")
message(STATUS "gRPC: CPP_PLUGIN=${_gRPC_CPP_PLUGIN}")
message(STATUS "gRPC: PYTHON_PLUGIN=${_gRPC_PYTHON_PLUGIN}")
message(STATUS "gRPC: CPP_PLUGIN=${gRPC_CPP_PLUGIN}")
message(STATUS "gRPC: PYTHON_PLUGIN=${gRPC_PYTHON_PLUGIN}")
endif()
endif()

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54443)
SET(VERSION_REVISION 54444)
SET(VERSION_MAJOR 20)
SET(VERSION_MINOR 12)
SET(VERSION_MINOR 13)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH c53725fb1f846fda074347607ab582fbb9c6f7a1)
SET(VERSION_DESCRIBE v20.12.1.1-prestable)
SET(VERSION_STRING 20.12.1.1)
SET(VERSION_GITHASH e581f9ccfc5c64867b0f488cce72412fd2966471)
SET(VERSION_DESCRIBE v20.13.1.1-prestable)
SET(VERSION_STRING 20.13.1.1)
# end of autochange

View File

@ -37,8 +37,8 @@ if(NOT USE_INTERNAL_GRPC_LIBRARY)
if(NOT gRPC_INCLUDE_DIRS OR NOT gRPC_LIBRARIES)
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't find system gRPC library")
set(EXTERNAL_GRPC_LIBRARY_FOUND 0)
elseif(NOT _gRPC_CPP_PLUGIN)
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't find system grcp_cpp_plugin")
elseif(NOT gRPC_CPP_PLUGIN)
message(${RECONFIGURE_MESSAGE_LEVEL} "Can't find system grpc_cpp_plugin")
set(EXTERNAL_GRPC_LIBRARY_FOUND 0)
else()
set(EXTERNAL_GRPC_LIBRARY_FOUND 1)
@ -53,8 +53,8 @@ if(NOT EXTERNAL_GRPC_LIBRARY_FOUND AND NOT MISSING_INTERNAL_GRPC_LIBRARY)
else()
set(gRPC_LIBRARIES grpc grpc++)
endif()
set(_gRPC_CPP_PLUGIN $<TARGET_FILE:grpc_cpp_plugin>)
set(_gRPC_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
set(gRPC_CPP_PLUGIN $<TARGET_FILE:grpc_cpp_plugin>)
set(gRPC_PYTHON_PLUGIN $<TARGET_FILE:grpc_python_plugin>)
include("${ClickHouse_SOURCE_DIR}/contrib/grpc-cmake/protobuf_generate_grpc.cmake")
@ -62,4 +62,4 @@ if(NOT EXTERNAL_GRPC_LIBRARY_FOUND AND NOT MISSING_INTERNAL_GRPC_LIBRARY)
set(USE_GRPC 1)
endif()
message(STATUS "Using gRPC=${USE_GRPC}: ${gRPC_INCLUDE_DIRS} : ${gRPC_LIBRARIES} : ${_gRPC_CPP_PLUGIN}")
message(STATUS "Using gRPC=${USE_GRPC}: ${gRPC_INCLUDE_DIRS} : ${gRPC_LIBRARIES} : ${gRPC_CPP_PLUGIN}")

1
contrib/abseil-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit 4f3b686f86c3ebaba7e4e926e62a79cb1c659a54

2
contrib/cctz vendored

@ -1 +1 @@
Subproject commit 7a2db4ece6e0f1b246173cbdb62711ae258ee841
Subproject commit 260ba195ef6c489968bae8c88c62a67cdac5ff9d

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit a6570b863cf76c9699580ba51c7827d5bffaac43
Subproject commit 7436366ceb341ba5c00ea29f1645e02a2b70bf93

View File

@ -1,6 +1,7 @@
set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc")
# Use re2 from ClickHouse contrib, not from gRPC third_party.
if(NOT RE2_INCLUDE_DIR)
message(FATAL_ERROR " grpc: The location of the \"re2\" library is unknown")
endif()
@ -8,6 +9,7 @@ set(gRPC_RE2_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_RE2_INCLUDE_DIR "${RE2_INCLUDE_DIR}")
set(_gRPC_RE2_LIBRARIES "${RE2_LIBRARY}")
# Use zlib from ClickHouse contrib, not from gRPC third_party.
if(NOT ZLIB_INCLUDE_DIRS)
message(FATAL_ERROR " grpc: The location of the \"zlib\" library is unknown")
endif()
@ -15,6 +17,7 @@ set(gRPC_ZLIB_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_INCLUDE_DIRS}")
set(_gRPC_ZLIB_LIBRARIES "${ZLIB_LIBRARIES}")
# Use protobuf from ClickHouse contrib, not from gRPC third_party.
if(NOT Protobuf_INCLUDE_DIR OR NOT Protobuf_LIBRARY)
message(FATAL_ERROR " grpc: The location of the \"protobuf\" library is unknown")
elseif (NOT Protobuf_PROTOC_EXECUTABLE)
@ -29,21 +32,33 @@ set(_gRPC_PROTOBUF_PROTOC "protoc")
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE "${Protobuf_PROTOC_EXECUTABLE}")
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES "${Protobuf_PROTOC_LIBRARY}")
# Use OpenSSL from ClickHouse contrib, not from gRPC third_party.
set(gRPC_SSL_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_SSL_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR})
set(_gRPC_SSL_LIBRARIES ${OPENSSL_LIBRARIES})
# Use abseil-cpp from ClickHouse contrib, not from gRPC third_party.
set(gRPC_ABSL_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
if(NOT EXISTS "${ABSL_ROOT_DIR}/CMakeLists.txt")
message(FATAL_ERROR " grpc: submodule third_party/abseil-cpp is missing. To fix try run: \n git submodule update --init --recursive")
endif()
add_subdirectory("${ABSL_ROOT_DIR}" "${ClickHouse_BINARY_DIR}/contrib/abseil-cpp")
# Choose to build static or shared library for c-ares.
if (MAKE_STATIC_LIBRARIES)
set(CARES_STATIC ON CACHE BOOL "" FORCE)
set(CARES_SHARED OFF CACHE BOOL "" FORCE)
else ()
set(CARES_STATIC OFF CACHE BOOL "" FORCE)
set(CARES_SHARED ON CACHE BOOL "" FORCE)
endif ()
# We don't want to build C# extensions.
set(gRPC_BUILD_CSHARP_EXT OFF)
# We don't want to build abseil tests, so we temporarily switch BUILD_TESTING off.
set(_gRPC_ORIG_BUILD_TESTING ${BUILD_TESTING})
set(BUILD_TESTING OFF)
add_subdirectory("${_gRPC_SOURCE_DIR}" "${_gRPC_BINARY_DIR}")
set(BUILD_TESTING ${_gRPC_ORIG_BUILD_TESTING})
# The contrib/grpc/CMakeLists.txt redefined the PROTOBUF_GENERATE_GRPC_CPP() function for its own purposes,
# so we need to redefine it back.
include("${ClickHouse_SOURCE_DIR}/contrib/grpc-cmake/protobuf_generate_grpc.cmake")

View File

@ -22,7 +22,16 @@ set_source_files_properties(${LIBUNWIND_C_SOURCES} PROPERTIES COMPILE_FLAGS "-st
set(LIBUNWIND_ASM_SOURCES
${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S
${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S)
set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C)
# CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1]
# Workaround these two issues by compiling as C.
#
# [1]: https://gitlab.kitware.com/cmake/cmake/-/issues/20771
if (APPLE AND CMAKE_VERSION VERSION_LESS 3.19)
set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C)
else()
enable_language(ASM)
endif()
set(LIBUNWIND_SOURCES
${LIBUNWIND_CXX_SOURCES}

2
contrib/protobuf vendored

@ -1 +1 @@
Subproject commit 445d1ae73a450b1e94622e7040989aa2048402e3
Subproject commit 73b12814204ad9068ba352914d0dc244648b48ee

2
contrib/rocksdb vendored

@ -1 +1 @@
Subproject commit 963314ffd681596ef2738a95249fe4c1163ef87a
Subproject commit 35d8e36ef1b8e3e0759ca81215f855226a0a54bd

View File

@ -347,8 +347,9 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
${ROCKSDB_SOURCE_DIR}/db/builder.cc
${ROCKSDB_SOURCE_DIR}/db/c.cc
@ -394,6 +395,8 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
${ROCKSDB_SOURCE_DIR}/db/repair.cc
@ -451,12 +454,12 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
${ROCKSDB_SOURCE_DIR}/monitoring/stats_dump_scheduler.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
${ROCKSDB_SOURCE_DIR}/options/options.cc
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
@ -507,6 +510,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
@ -515,6 +519,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (20.12.1.1) unstable; urgency=low
clickhouse (20.13.1.1) unstable; urgency=low
* Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 05 Nov 2020 21:52:47 +0300
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 23 Nov 2020 10:29:24 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.12.1.*
ARG version=20.13.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \

View File

@ -56,6 +56,7 @@ RUN apt-get update \
libprotoc-dev \
libgrpc++-dev \
protobuf-compiler-grpc \
libc-ares-dev \
rapidjson-dev \
libsnappy-dev \
libparquet-dev \

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.12.1.*
ARG version=20.13.1.*
ARG gosu_ver=1.10
RUN apt-get update \

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.12.1.*
ARG version=20.13.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \

View File

@ -7,8 +7,10 @@ ENV SOURCE_DIR=/build
ENV OUTPUT_DIR=/output
ENV IGNORE='.*contrib.*'
CMD mkdir -p /build/obj-x86_64-linux-gnu && cd /build/obj-x86_64-linux-gnu && CC=clang-10 CXX=clang++-10 cmake .. && cd /; \
RUN apt-get update && apt-get install cmake --yes --no-install-recommends
CMD mkdir -p /build/obj-x86_64-linux-gnu && cd /build/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd /; \
dpkg -i /package_folder/clickhouse-common-static_*.deb; \
llvm-profdata-10 merge -sparse ${COVERAGE_DIR}/* -o clickhouse.profdata && \
llvm-cov-10 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex $IGNORE > output.lcov && \
llvm-profdata-11 merge -sparse ${COVERAGE_DIR}/* -o clickhouse.profdata && \
llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex $IGNORE > output.lcov && \
genhtml output.lcov --ignore-errors source --output-directory ${OUTPUT_DIR}

View File

@ -15,6 +15,9 @@ stage=${stage:-}
# empty parameter.
read -ra FASTTEST_CMAKE_FLAGS <<< "${FASTTEST_CMAKE_FLAGS:-}"
# Run only matching tests.
FASTTEST_FOCUS=${FASTTEST_FOCUS:-""}
FASTTEST_WORKSPACE=$(readlink -f "${FASTTEST_WORKSPACE:-.}")
FASTTEST_SOURCE=$(readlink -f "${FASTTEST_SOURCE:-$FASTTEST_WORKSPACE/ch}")
FASTTEST_BUILD=$(readlink -f "${FASTTEST_BUILD:-${BUILD:-$FASTTEST_WORKSPACE/build}}")
@ -287,9 +290,11 @@ TESTS_TO_SKIP=(
01322_ttest_scipy
01545_system_errors
# Checks system.errors
01563_distributed_query_finish
)
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
# substr is to remove semicolon after test name
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt")

View File

@ -30,7 +30,7 @@ RUN apt-get update \
tzdata \
vim \
wget \
&& pip3 --no-cache-dir install clickhouse_driver scipy \
&& pip3 --no-cache-dir install 'clickhouse-driver>=0.1.5' scipy \
&& apt-get purge --yes python3-dev g++ \
&& apt-get autoremove --yes \
&& apt-get clean \

View File

@ -14,10 +14,12 @@ import string
import sys
import time
import traceback
import logging
import xml.etree.ElementTree as et
from threading import Thread
from scipy import stats
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING')
total_start_seconds = time.perf_counter()
stage_start_seconds = total_start_seconds
@ -46,6 +48,8 @@ parser.add_argument('--profile-seconds', type=int, default=0, help='For how many
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.")
parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.")
args = parser.parse_args()
reportStageEnd('start')
@ -146,20 +150,21 @@ for i, s in enumerate(servers):
reportStageEnd('connect')
# Run drop queries, ignoring errors. Do this before all other activity, because
# clickhouse_driver disconnects on error (this is not configurable), and the new
# connection loses the changes in settings.
drop_query_templates = [q.text for q in root.findall('drop_query')]
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
try:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
except:
pass
if not args.use_existing_tables:
# Run drop queries, ignoring errors. Do this before all other activity,
# because clickhouse_driver disconnects on error (this is not configurable),
# and the new connection loses the changes in settings.
drop_query_templates = [q.text for q in root.findall('drop_query')]
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
try:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
except:
pass
reportStageEnd('drop-1')
reportStageEnd('drop-1')
# Apply settings.
# If there are errors, report them and continue -- maybe a new test uses a setting
@ -171,12 +176,9 @@ reportStageEnd('drop-1')
settings = root.findall('settings/*')
for conn_index, c in enumerate(all_connections):
for s in settings:
try:
q = f"set {s.tag} = '{s.text}'"
c.execute(q)
print(f'set\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
except:
print(traceback.format_exc(), file=sys.stderr)
# requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings
# (https://github.com/mymarilyn/clickhouse-driver/pull/142)
c.settings[s.tag] = s.text
reportStageEnd('settings')
@ -194,37 +196,40 @@ for t in tables:
reportStageEnd('preconditions')
# Run create and fill queries. We will run them simultaneously for both servers,
# to save time.
# The weird search is to keep the relative order of elements, which matters, and
# etree doesn't support the appropriate xpath query.
create_query_templates = [q.text for q in root.findall('./*') if q.tag in ('create_query', 'fill_query')]
create_queries = substitute_parameters(create_query_templates)
if not args.use_existing_tables:
# Run create and fill queries. We will run them simultaneously for both
# servers, to save time. The weird XML search + filter is because we want to
# keep the relative order of elements, and etree doesn't support the
# appropriate xpath query.
create_query_templates = [q.text for q in root.findall('./*')
if q.tag in ('create_query', 'fill_query')]
create_queries = substitute_parameters(create_query_templates)
# Disallow temporary tables, because the clickhouse_driver reconnects on errors,
# and temporary tables are destroyed. We want to be able to continue after some
# errors.
for q in create_queries:
if re.search('create temporary table', q, flags=re.IGNORECASE):
print(f"Temporary tables are not allowed in performance tests: '{q}'",
file = sys.stderr)
sys.exit(1)
# Disallow temporary tables, because the clickhouse_driver reconnects on
# errors, and temporary tables are destroyed. We want to be able to continue
# after some errors.
for q in create_queries:
if re.search('create temporary table', q, flags=re.IGNORECASE):
print(f"Temporary tables are not allowed in performance tests: '{q}'",
file = sys.stderr)
sys.exit(1)
def do_create(connection, index, queries):
for q in queries:
connection.execute(q)
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
def do_create(connection, index, queries):
for q in queries:
connection.execute(q)
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
threads = [Thread(target = do_create, args = (connection, index, create_queries))
for index, connection in enumerate(all_connections)]
threads = [
Thread(target = do_create, args = (connection, index, create_queries))
for index, connection in enumerate(all_connections)]
for t in threads:
t.start()
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
t.join()
reportStageEnd('create')
reportStageEnd('create')
# By default, test all queries.
queries_to_run = range(0, len(test_queries))
@ -403,10 +408,11 @@ print(f'profile-total\t{profile_total_seconds}')
reportStageEnd('run')
# Run drop queries
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
if not args.keep_created_tables and not args.use_existing_tables:
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
reportStageEnd('drop-2')

View File

@ -10,6 +10,11 @@ RUN apt-get update --yes \
gpg-agent \
debsig-verify \
strace \
protobuf-compiler \
protobuf-compiler-grpc \
libprotoc-dev \
libgrpc++-dev \
libc-ares-dev \
--yes --no-install-recommends
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
@ -33,7 +38,8 @@ RUN set -x \
&& dpkg -i "${PKG_VERSION}.deb"
CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF && ninja re2_st \
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \
&& ninja re2_st clickhouse_grpc_protos \
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \
plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log

View File

@ -1,12 +1,12 @@
# docker build -t yandex/clickhouse-stateful-test-with-coverage .
FROM yandex/clickhouse-stateless-test
FROM yandex/clickhouse-stateless-test-with-coverage
RUN echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" >> /etc/apt/sources.list
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
python3-requests
python3-requests procps psmisc
COPY s3downloader /s3downloader
COPY run.sh /run.sh

View File

@ -1,40 +1,44 @@
#!/bin/bash
kill_clickhouse () {
kill "$(pgrep -u clickhouse)" 2>/dev/null
echo "clickhouse pids $(pgrep -u clickhouse)" | ts '%Y-%m-%d %H:%M:%S'
pkill -f "clickhouse-server" 2>/dev/null
for _ in {1..10}
for _ in {1..120}
do
if ! kill -0 "$(pgrep -u clickhouse)"; then
echo "No clickhouse process"
break
else
echo "Process $(pgrep -u clickhouse) still alive"
sleep 10
fi
if ! pkill -0 -f "clickhouse-server" ; then break ; fi
echo "ClickHouse still alive" | ts '%Y-%m-%d %H:%M:%S'
sleep 1
done
if pkill -0 -f "clickhouse-server"
then
pstree -apgT
jobs
echo "Failed to kill the ClickHouse server" | ts '%Y-%m-%d %H:%M:%S'
return 1
fi
}
start_clickhouse () {
LLVM_PROFILE_FILE='server_%h_%p_%m.profraw' sudo -Eu clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml &
}
wait_llvm_profdata () {
while kill -0 "$(pgrep llvm-profdata-10)"
counter=0
until clickhouse-client --query "SELECT 1"
do
echo "Waiting for profdata $(pgrep llvm-profdata-10) still alive"
sleep 3
if [ "$counter" -gt 120 ]
then
echo "Cannot start clickhouse-server"
cat /var/log/clickhouse-server/stdout.log
tail -n1000 /var/log/clickhouse-server/stderr.log
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
break
fi
sleep 0.5
counter=$((counter + 1))
done
}
merge_client_files_in_background () {
client_files=$(ls /client_*profraw 2>/dev/null)
if [ -n "$client_files" ]
then
llvm-profdata-10 merge -sparse "$client_files" -o "merged_client_$(date +%s).profraw"
rm "$client_files"
fi
}
chmod 777 /
@ -51,26 +55,7 @@ chmod 777 -R /var/log/clickhouse-server/
# install test configs
/usr/share/clickhouse-test/config/install.sh
function start()
{
counter=0
until clickhouse-client --query "SELECT 1"
do
if [ "$counter" -gt 120 ]
then
echo "Cannot start clickhouse-server"
cat /var/log/clickhouse-server/stdout.log
tail -n1000 /var/log/clickhouse-server/stderr.log
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
break
fi
timeout 120 service clickhouse-server start
sleep 0.5
counter=$((counter + 1))
done
}
start
start_clickhouse
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
if ! /s3downloader --dataset-names $DATASETS; then
@ -81,25 +66,20 @@ fi
chmod 777 -R /var/lib/clickhouse
while /bin/true; do
merge_client_files_in_background
sleep 2
done &
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "SHOW DATABASES"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "CREATE DATABASE test"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "SHOW DATABASES"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "CREATE DATABASE test"
kill_clickhouse
start_clickhouse
sleep 10
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "SHOW TABLES FROM datasets"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "SHOW TABLES FROM test"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-client --query "SHOW TABLES FROM test"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "SHOW TABLES FROM datasets"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
SKIP_LIST_OPT="--use-skip-list"
@ -109,15 +89,10 @@ fi
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
LLVM_PROFILE_FILE='client_%h_%p_%m.profraw' clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
LLVM_PROFILE_FILE='client_coverage.profraw' clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
kill_clickhouse
wait_llvm_profdata
sleep 3
wait_llvm_profdata # 100% merged all parts
cp /*.profraw /profraw ||:

View File

@ -1,4 +1,4 @@
# docker build -t yandex/clickhouse-stateless-with-coverage-test .
# docker build -t yandex/clickhouse-stateless-test-with-coverage .
# TODO: that can be based on yandex/clickhouse-stateless-test (llvm version and CMD differs)
FROM yandex/clickhouse-test-base
@ -28,7 +28,9 @@ RUN apt-get update -y \
lsof \
unixodbc \
wget \
qemu-user-static
qemu-user-static \
procps \
psmisc
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \

View File

@ -2,27 +2,41 @@
kill_clickhouse () {
echo "clickhouse pids $(pgrep -u clickhouse)" | ts '%Y-%m-%d %H:%M:%S'
kill "$(pgrep -u clickhouse)" 2>/dev/null
pkill -f "clickhouse-server" 2>/dev/null
for _ in {1..10}
for _ in {1..120}
do
if ! kill -0 "$(pgrep -u clickhouse)"; then
echo "No clickhouse process" | ts '%Y-%m-%d %H:%M:%S'
break
else
echo "Process $(pgrep -u clickhouse) still alive" | ts '%Y-%m-%d %H:%M:%S'
sleep 10
fi
if ! pkill -0 -f "clickhouse-server" ; then break ; fi
echo "ClickHouse still alive" | ts '%Y-%m-%d %H:%M:%S'
sleep 1
done
echo "Will try to send second kill signal for sure"
kill "$(pgrep -u clickhouse)" 2>/dev/null
sleep 5
echo "clickhouse pids $(pgrep -u clickhouse)" | ts '%Y-%m-%d %H:%M:%S'
if pkill -0 -f "clickhouse-server"
then
pstree -apgT
jobs
echo "Failed to kill the ClickHouse server" | ts '%Y-%m-%d %H:%M:%S'
return 1
fi
}
start_clickhouse () {
LLVM_PROFILE_FILE='server_%h_%p_%m.profraw' sudo -Eu clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml &
counter=0
until clickhouse-client --query "SELECT 1"
do
if [ "$counter" -gt 120 ]
then
echo "Cannot start clickhouse-server"
cat /var/log/clickhouse-server/stdout.log
tail -n1000 /var/log/clickhouse-server/stderr.log
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
break
fi
sleep 0.5
counter=$((counter + 1))
done
}
chmod 777 /
@ -44,9 +58,6 @@ chmod 777 -R /var/log/clickhouse-server/
start_clickhouse
sleep 10
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
SKIP_LIST_OPT="--use-skip-list"
fi

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.62 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.65 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce

View File

@ -152,7 +152,7 @@ You can specify default arguments for `Replicated` table engine in the server co
```xml
<default_replica_path>/clickhouse/tables/{shard}/{database}/{table}</default_replica_path>
<default_replica_name>{replica}</default_replica_path>
<default_replica_name>{replica}</default_replica_name>
```
In this case, you can omit arguments when creating tables:

View File

@ -11,7 +11,7 @@ By going through this tutorial, youll learn how to set up a simple ClickHouse
## Single Node Setup {#single-node-setup}
To postpone the complexities of a distributed environment, well start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](../getting-started/install.md#install-from-deb-packages) or [rpm](../getting-started/install.md#from-rpm-packages) packages, but there are [alternatives](../getting-started/install.md#from-docker-image) for the operating systems that do no support them.
To postpone the complexities of a distributed environment, well start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](../getting-started/install.md#install-from-deb-packages) or [rpm](../getting-started/install.md#from-rpm-packages) packages, but there are [alternatives](../getting-started/install.md#from-docker-image) for the operating systems that do not support them.
For example, you have chosen `deb` packages and executed:

View File

@ -44,11 +44,10 @@ stages, such as query planning or distributed queries.
To be useful, the tracing information has to be exported to a monitoring system
that supports OpenTelemetry, such as Jaeger or Prometheus. ClickHouse avoids
a dependency on a particular monitoring system, instead only
providing the tracing data conforming to the standard. A natural way to do so
in an SQL RDBMS is a system table. OpenTelemetry trace span information
a dependency on a particular monitoring system, instead only providing the
tracing data through a system table. OpenTelemetry trace span information
[required by the standard](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/overview.md#span)
is stored in the system table called `system.opentelemetry_span_log`.
is stored in the `system.opentelemetry_span_log` table.
The table must be enabled in the server configuration, see the `opentelemetry_span_log`
element in the default config file `config.xml`. It is enabled by default.
@ -67,3 +66,31 @@ The table has the following columns:
The tags or attributes are saved as two parallel arrays, containing the keys
and values. Use `ARRAY JOIN` to work with them.
## Integration with monitoring systems
At the moment, there is no ready tool that can export the tracing data from
ClickHouse to a monitoring system.
For testing, it is possible to setup the export using a materialized view with the URL engine over the `system.opentelemetry_span_log` table, which would push the arriving log data to an HTTP endpoint of a trace collector. For example, to push the minimal span data to a Zipkin instance running at `http://localhost:9411`, in Zipkin v2 JSON format:
```sql
CREATE MATERIALIZED VIEW default.zipkin_spans
ENGINE = URL('http://127.0.0.1:9411/api/v2/spans', 'JSONEachRow')
SETTINGS output_format_json_named_tuples_as_objects = 1,
output_format_json_array_of_rows = 1 AS
SELECT
lower(hex(reinterpretAsFixedString(trace_id))) AS traceId,
lower(hex(parent_span_id)) AS parentId,
lower(hex(span_id)) AS id,
operation_name AS name,
start_time_us AS timestamp,
finish_time_us - start_time_us AS duration,
cast(tuple('clickhouse'), 'Tuple(serviceName text)') AS localEndpoint,
cast(tuple(
attribute.values[indexOf(attribute.names, 'db.statement')]),
'Tuple("db.statement" text)') AS tags
FROM system.opentelemetry_span_log
```
In case of any errors, the part of the log data for which the error has occurred will be silently lost. Check the server log for error messages if the data does not arrive.

View File

@ -2317,4 +2317,10 @@ Possible values:
Default value: `1`.
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
Allows configurable `NULL` representation for [TSV](../../interfaces/formats.md#tabseparated) output format. The setting only controls output format and `\N` is the only supported `NULL` representation for TSV input format.
Default value: `\N`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -0,0 +1,70 @@
# system.replicated_fetches {#system_tables-replicated_fetches}
Contains information about currently running background fetches.
Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database.
- `table` ([String](../../sql-reference/data-types/string.md)) — Name of the table.
- `elapsed` ([Float64](../../sql-reference/data-types/float.md)) — The time elapsed (in seconds) since showing currently running background fetches started.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of completed work from 0 to 1.
- `result_part_name` ([String](../../sql-reference/data-types/string.md)) — The name of the part that will be formed as the result of showing currently running background fetches.
- `result_part_path` ([String](../../sql-reference/data-types/string.md)) — Absolute path to the part that will be formed as the result of showing currently running background fetches.
- `partition_id` ([String](../../sql-reference/data-types/string.md)) — ID of the partition.
- `total_size_bytes_compressed` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The total size (in bytes) of the compressed data in the result part.
- `bytes_read_compressed` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of compressed bytes read from the result part.
- `source_replica_path` ([String](../../sql-reference/data-types/string.md)) — Absolute path to the source replica.
- `source_replica_hostname` ([String](../../sql-reference/data-types/string.md)) — Hostname of the source replica.
- `source_replica_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — Port number of the source replica.
- `interserver_scheme` ([String](../../sql-reference/data-types/string.md)) — Name of the interserver scheme.
- `URI` ([String](../../sql-reference/data-types/string.md)) — Uniform resource identifier.
- `to_detached` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag indicates whether the currently running background fetch is being performed using the `TO DETACHED` expression.
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
**Example**
``` sql
SELECT * FROM system.replicated_fetches LIMIT 1 FORMAT Vertical;
```
``` text
Row 1:
──────
database: default
table: t
elapsed: 7.243039876
progress: 0.41832135995612835
result_part_name: all_0_0_0
result_part_path: /var/lib/clickhouse/store/700/70080a04-b2de-4adf-9fa5-9ea210e81766/all_0_0_0/
partition_id: all
total_size_bytes_compressed: 1052783726
bytes_read_compressed: 440401920
source_replica_path: /clickhouse/test/t/replicas/1
source_replica_hostname: node1
source_replica_port: 9009
interserver_scheme: http
URI: http://node1:9009/?endpoint=DataPartsExchange%3A%2Fclickhouse%2Ftest%2Ft%2Freplicas%2F1&part=all_0_0_0&client_protocol_version=4&compress=false
to_detached: 0
thread_id: 54
```
**See Also**
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system/#query-language-system-replicated)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicated_fetches) <!--hide-->

View File

@ -1,42 +1,42 @@
# ClickHouse obfuscator
Simple tool for table data obfuscation.
It reads input table and produces output table, that retain some properties of input, but contains different data.
It allows to publish almost real production data for usage in benchmarks.
It is designed to retain the following properties of data:
- cardinalities of values (number of distinct values) for every column and for every tuple of columns;
- conditional cardinalities: number of distinct values of one column under condition on value of another column;
- probability distributions of absolute value of integers; sign of signed integers; exponent and sign for floats;
- probability distributions of length of strings;
- probability of zero values of numbers; empty strings and arrays, NULLs;
- data compression ratio when compressed with LZ77 and entropy family of codecs;
- continuity (magnitude of difference) of time values across table; continuity of floating point values.
- date component of DateTime values;
- UTF-8 validity of string values;
- string values continue to look somewhat natural.
Most of the properties above are viable for performance testing:
reading data, filtering, aggregation and sorting will work at almost the same speed
as on original data due to saved cardinalities, magnitudes, compression ratios, etc.
It works in deterministic fashion: you define a seed value and transform is totally determined by input data and by seed.
Some transforms are one to one and could be reversed, so you need to have large enough seed and keep it in secret.
It use some cryptographic primitives to transform data, but from the cryptographic point of view,
It doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it.
It may retain some data you don't want to publish.
It always leave numbers 0, 1, -1 as is. Also it leaves dates, lengths of arrays and null flags exactly as in source data.
For example, you have a column IsMobile in your table with values 0 and 1. In transformed data, it will have the same value.
So, the user will be able to count exact ratio of mobile traffic.
Another example, suppose you have some private data in your table, like user email and you don't want to publish any single email address.
If your table is large enough and contain multiple different emails and there is no email that have very high frequency than all others,
It will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them.
And you should take care and look at exact algorithm, how this tool works, and probably fine tune some of it command line parameters.
This tool works fine only with reasonable amount of data (at least 1000s of rows).
# ClickHouse obfuscator
A simple tool for table data obfuscation.
It reads an input table and produces an output table, that retains some properties of input, but contains different data.
It allows publishing almost real production data for usage in benchmarks.
It is designed to retain the following properties of data:
- cardinalities of values (number of distinct values) for every column and every tuple of columns;
- conditional cardinalities: number of distinct values of one column under the condition on the value of another column;
- probability distributions of the absolute value of integers; the sign of signed integers; exponent and sign for floats;
- probability distributions of the length of strings;
- probability of zero values of numbers; empty strings and arrays, `NULL`s;
- data compression ratio when compressed with LZ77 and entropy family of codecs;
- continuity (magnitude of difference) of time values across the table; continuity of floating-point values;
- date component of `DateTime` values;
- UTF-8 validity of string values;
- string values look natural.
Most of the properties above are viable for performance testing:
reading data, filtering, aggregatio, and sorting will work at almost the same speed
as on original data due to saved cardinalities, magnitudes, compression ratios, etc.
It works in a deterministic fashion: you define a seed value and the transformation is determined by input data and by seed.
Some transformations are one to one and could be reversed, so you need to have a large seed and keep it in secret.
It uses some cryptographic primitives to transform data but from the cryptographic point of view, it doesn't do it properly, that is why you should not consider the result as secure unless you have another reason. The result may retain some data you don't want to publish.
It always leaves 0, 1, -1 numbers, dates, lengths of arrays, and null flags exactly as in source data.
For example, you have a column `IsMobile` in your table with values 0 and 1. In transformed data, it will have the same value.
So, the user will be able to count the exact ratio of mobile traffic.
Let's give another example. When you have some private data in your table, like user email and you don't want to publish any single email address.
If your table is large enough and contains multiple different emails and no email has a very high frequency than all others, it will anonymize all data. But if you have a small number of different values in a column, it can reproduce some of them.
You should look at the working algorithm of this tool works, and fine-tune its command line parameters.
This tool works fine only with an average amount of data (at least 1000s of rows).

View File

@ -44,8 +44,6 @@ SELECT sum(y) FROM t_null_big
└────────┘
```
The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`.
Now you can use the `groupArray` function to create an array from the `y` column:
``` sql

View File

@ -0,0 +1,37 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Initializes aggregation for your input rows. It is intended for the functions with the suffix `State`.
Use it for tests or to process columns of types `AggregateFunction` and `AggregationgMergeTree`.
**Syntax**
``` sql
initializeAggregation (aggregate_function, column_1, column_2);
```
**Parameters**
- `aggregate_function` — Name of the aggregation function. The state of this function — the creating one. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — The column to translate it into the function as it's argument. [String](../../../sql-reference/data-types/string.md#string).
**Returned value(s)**
Returns the result of the aggregation for your input rows. The return type will be the same as the return type of function, that `initializeAgregation` takes as first argument.
For example for functions with the suffix `State` the return type will be `AggregateFunction`.
**Example**
Query:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Result:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -535,18 +535,7 @@ dateDiff('unit', startdate, enddate, [timezone])
- `unit` — Time unit, in which the returned value is expressed. [String](../../sql-reference/syntax.md#syntax-string-literal).
Supported values:
| unit |
| ---- |
|second |
|minute |
|hour |
|day |
|week |
|month |
|quarter |
|year |
Supported values: second, minute, hour, day, week, month, quarter, year.
- `startdate` — The first time value to compare. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).

View File

@ -0,0 +1,381 @@
---
toc_priority: 67
toc_title: Encryption
---
# Encryption functions {#encryption-functions}
These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm.
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.
Initialization vector length is always 16 bytes (bytes in excess of 16 are ignored).
Note that these functions work slowly.
## encrypt {#encrypt}
This function encrypts data using these modes:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
- aes-128-gcm, aes-192-gcm, aes-256-gcm
**Syntax**
``` sql
encrypt('mode', 'plaintext', 'key' [, iv, aad])
```
**Parameters**
- `mode` — Encryption mode. [String](../../sql-reference/data-types/string.md#string).
- `plaintext` — Text thats need to be encrypted. [String](../../sql-reference/data-types/string.md#string).
- `key` — Encryption key. [String](../../sql-reference/data-types/string.md#string).
- `iv` — Initialization vector. Required for `-gcm` modes, optinal for others. [String](../../sql-reference/data-types/string.md#string).
- `aad` — Additional authenticated data. It isn't encrypted, but it affects decryption. Works only in `-gcm` modes, for others would throw an exception. [String](../../sql-reference/data-types/string.md#string).
**Returned value**
- Ciphered String. [String](../../sql-reference/data-types/string.md#string).
**Examples**
Create this table:
Query:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Insert this data:
Query:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Example without `iv`:
Query:
``` sql
SELECT 'aes-128-ecb' AS mode, hex(encrypt(mode, input, key16)) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─hex(encrypt('aes-128-ecb', input, key16))────────────────────────┐
│ aes-128-ecb │ 4603E6862B0D94BBEC68E0B0DF51D60F │
│ aes-128-ecb │ 3004851B86D3F3950672DE7085D27C03 │
│ aes-128-ecb │ E807F8C8D40A11F65076361AFC7D8B68D8658C5FAA6457985CAA380F16B3F7E4 │
└─────────────┴──────────────────────────────────────────────────────────────────┘
```
Example with `iv`:
Query:
``` sql
SELECT 'aes-256-ctr' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─hex(encrypt('aes-256-ctr', input, key32, iv))─┐
│ aes-256-ctr │ │
│ aes-256-ctr │ 7FB039F7 │
│ aes-256-ctr │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2B325949 │
└─────────────┴───────────────────────────────────────────────┘
```
Example with `-gcm`:
Query:
``` sql
SELECT 'aes-256-gcm' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─hex(encrypt('aes-256-gcm', input, key32, iv))──────────────────────────┐
│ aes-256-gcm │ E99DBEBC01F021758352D7FBD9039EFA │
│ aes-256-gcm │ 8742CE3A7B0595B281C712600D274CA881F47414 │
│ aes-256-gcm │ A44FD73ACEB1A64BDE2D03808A2576EDBB60764CC6982DB9AF2C33C893D91B00C60DC5 │
└─────────────┴────────────────────────────────────────────────────────────────────────┘
```
Example with `-gcm` mode and with `aad`:
Query:
``` sql
SELECT 'aes-192-gcm' AS mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─hex(encrypt('aes-192-gcm', input, key24, iv, 'AAD'))───────────────────┐
│ aes-192-gcm │ 04C13E4B1D62481ED22B3644595CB5DB │
│ aes-192-gcm │ 9A6CF0FD2B329B04EAD18301818F016DF8F77447 │
│ aes-192-gcm │ B961E9FD9B940EBAD7ADDA75C9F198A40797A5EA1722D542890CC976E21113BBB8A7AA │
└─────────────┴────────────────────────────────────────────────────────────────────────┘
```
## aes_encrypt_mysql {#aes_encrypt_mysql}
Compatible with mysql encryption and can be decrypted with [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt) function.
Supported encryption modes:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
**Syntax**
```sql
aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv])
```
**Parameters**
- `mode` — Encryption mode. [String](../../sql-reference/data-types/string.md#string).
- `plaintext` — Text that needs to be encrypted. [String](../../sql-reference/data-types/string.md#string).
- `key` — Encryption key. [String](../../sql-reference/data-types/string.md#string).
- `iv` — Initialization vector. Optinal. [String](../../sql-reference/data-types/string.md#string).
**Returned value**
- Ciphered String. [String](../../sql-reference/data-types/string.md#string).
**Examples**
Create this table:
Query:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Insert this data:
Query:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Example without `iv`:
Query:
``` sql
SELECT 'aes-128-cbc' AS mode, hex(aes_encrypt_mysql(mode, input, key32)) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─hex(aes_encrypt_mysql('aes-128-cbc', input, key32))──────────────┐
│ aes-128-cbc │ FEA8CFDE6EE2C6E7A2CC6ADDC9F62C83 │
│ aes-128-cbc │ 78B16CD4BE107660156124C5FEE6454A │
│ aes-128-cbc │ 67C0B119D96F18E2823968D42871B3D179221B1E7EE642D628341C2B29BA2E18 │
└─────────────┴──────────────────────────────────────────────────────────────────┘
```
Example with `iv`:
Query:
``` sql
SELECT 'aes-256-cfb128' AS mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
```
Result:
``` text
┌─mode───────────┬─hex(aes_encrypt_mysql('aes-256-cfb128', input, key32, iv))─┐
│ aes-256-cfb128 │ │
│ aes-256-cfb128 │ 7FB039F7 │
│ aes-256-cfb128 │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2BB5174F │
└────────────────┴────────────────────────────────────────────────────────────┘
```
## decrypt {#decrypt}
This function decrypts data using these modes:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
- aes-128-gcm, aes-192-gcm, aes-256-gcm
**Syntax**
```sql
decrypt('mode', 'ciphertext', 'key' [, iv, aad])
```
**Parameters**
- `mode` — Decryption mode. [String](../../sql-reference/data-types/string.md#string).
- `ciphertext` — Encrypted text that needs to be decrypted. [String](../../sql-reference/data-types/string.md#string).
- `key` — Decryption key. [String](../../sql-reference/data-types/string.md#string).
- `iv` — Initialization vector. Required for `-gcm` modes, optinal for others. [String](../../sql-reference/data-types/string.md#string).
- `aad` — Additional authenticated data. Won't decrypt if this value is incorrect. Works only in `-gcm` modes, for others would throw an exception. [String](../../sql-reference/data-types/string.md#string).
**Returned value**
- Decrypted String. [String](../../sql-reference/data-types/string.md#string).
**Examples**
Create this table:
Query:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Insert this data:
Query:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Query:
``` sql
SELECT 'aes-128-ecb' AS mode, decrypt(mode, encrypt(mode, input, key16), key16) FROM encryption_test;
```
Result:
```text
┌─mode────────┬─decrypt('aes-128-ecb', encrypt('aes-128-ecb', input, key16), key16)─┐
│ aes-128-ecb │ │
│ aes-128-ecb │ text │
│ aes-128-ecb │ What Is ClickHouse? │
└─────────────┴─────────────────────────────────────────────────────────────────────┘
```
## aes_decrypt_mysql {#aes_decrypt_mysql}
Compatible with mysql encryption and decrypts data encrypted with [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt) function.
Supported decryption modes:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
**Syntax**
```sql
aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv])
```
**Parameters**
- `mode` — Decryption mode. [String](../../sql-reference/data-types/string.md#string).
- `ciphertext` — Encrypted text that needs to be decrypted. [String](../../sql-reference/data-types/string.md#string).
- `key` — Decryption key. [String](../../sql-reference/data-types/string.md#string).
- `iv` — Initialization vector. Optinal. [String](../../sql-reference/data-types/string.md#string).
**Returned value**
- Decrypted String. [String](../../sql-reference/data-types/string.md#string).
**Examples**
Create this table:
Query:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Insert this data:
Query:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Query:
``` sql
SELECT 'aes-128-cbc' AS mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key), key) FROM encryption_test;
```
Result:
``` text
┌─mode────────┬─aes_decrypt_mysql('aes-128-cbc', aes_encrypt_mysql('aes-128-cbc', input, key), key)─┐
│ aes-128-cbc │ │
│ aes-128-cbc │ text │
│ aes-128-cbc │ What Is ClickHouse? │
└─────────────┴─────────────────────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/encryption_functions/) <!--hide-->

View File

@ -306,3 +306,67 @@ execute_native_thread_routine
start_thread
clone
```
## tid {#tid}
Returns id of the thread, in which current [Block](https://clickhouse.tech/docs/en/development/architecture/#block) is processed.
**Syntax**
``` sql
tid()
```
**Returned value**
- Current thread id. [Uint64](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Example**
Query:
``` sql
SELECT tid();
```
Result:
``` text
┌─tid()─┐
│ 3878 │
└───────┘
```
## logTrace {#logtrace}
Emits trace log message to server log for each [Block](https://clickhouse.tech/docs/en/development/architecture/#block).
**Syntax**
``` sql
logTrace('message')
```
**Parameters**
- `message` — Message that is emitted to server log. [String](../../sql-reference/data-types/string.md#string).
**Returned value**
- Always returns 0.
**Example**
Query:
``` sql
SELECT logTrace('logTrace message');
```
Result:
``` text
┌─logTrace('logTrace message')─┐
│ 0 │
└──────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/introspection/) <!--hide-->

View File

@ -115,7 +115,21 @@ Returns the “first significant subdomain”. This is a non-standard concept sp
Returns the part of the domain that includes top-level subdomains up to the “first significant subdomain” (see the explanation above).
For example, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
For example:
- `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
- `cutToFirstSignificantSubdomain('www.tr') = 'tr'`.
- `cutToFirstSignificantSubdomain('tr') = ''`.
### cutToFirstSignificantSubdomainWithWWW {#cuttofirstsignificantsubdomainwithwww}
Returns the part of the domain that includes top-level subdomains up to the “first significant subdomain”, without stripping "www".
For example:
- `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
- `cutToFirstSignificantSubdomain('www.tr') = 'www.tr'`.
- `cutToFirstSignificantSubdomain('tr') = ''`.
### port(URL\[, default_port = 0\]) {#port}

View File

@ -20,7 +20,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.

View File

@ -27,9 +27,9 @@ It is applicable when selecting data from tables that use the [MergeTree](../../
### Drawbacks {#drawbacks}
Queries that use `FINAL` are executed not as fast as similar queries that dont, because:
Queries that use `FINAL` are executed slightly slower than similar queries that dont, because:
- Query is executed in a single thread and data is merged during query execution.
- Data is merged during query execution.
- Queries with `FINAL` read primary key columns in addition to the columns specified in the query.
**In most cases, avoid using `FINAL`.** The common approach is to use different queries that assume the background processes of the `MergeTree` engine havet happened yet and deal with it by applying aggregation (for example, to discard duplicates). {## TODO: examples ##}

View File

@ -6,7 +6,7 @@ toc_title: GROUP BY
`GROUP BY` clause switches the `SELECT` query into an aggregation mode, which works as follows:
- `GROUP BY` clause contains a list of expressions (or a single expression, which is considered to be the list of length one). This list acts as a “grouping key”, while each individual expression will be referred to as a “key expressions”.
- `GROUP BY` clause contains a list of expressions (or a single expression, which is considered to be the list of length one). This list acts as a “grouping key”, while each individual expression will be referred to as a “key expression”.
- All the expressions in the [SELECT](../../../sql-reference/statements/select/index.md), [HAVING](../../../sql-reference/statements/select/having.md), and [ORDER BY](../../../sql-reference/statements/select/order-by.md) clauses **must** be calculated based on key expressions **or** on [aggregate functions](../../../sql-reference/aggregate-functions/index.md) over non-key expressions (including plain columns). In other words, each column selected from the table must be used either in a key expression or inside an aggregate function, but not both.
- Result of aggregating `SELECT` query will contain as many rows as there were unique values of “grouping key” in source table. Usually this signficantly reduces the row count, often by orders of magnitude, but not necessarily: row count stays the same if all “grouping key” values were distinct.
@ -45,6 +45,154 @@ You can see that `GROUP BY` for `y = NULL` summed up `x`, as if `NULL` is this v
If you pass several keys to `GROUP BY`, the result will give you all the combinations of the selection, as if `NULL` were a specific value.
## WITH ROLLUP Modifier {#with-rollup-modifier}
`WITH ROLLUP` modifier is used to calculate subtotals for the key expressions, based on their order in the `GROUP BY` list. The subtotals rows are added after the result table.
The subtotals are calculated in the reverse order: at first subtotals are calculated for the last key expression in the list, then for the previous one, and so on up to the first key expression.
In the subtotals rows the values of already "grouped" key expressions are set to `0` or empty line.
!!! note "Note"
Mind that [HAVING](../../../sql-reference/statements/select/having.md) clause can affect the subtotals results.
**Example**
Consider the table t:
```text
┌─year─┬─month─┬─day─┐
│ 2019 │ 1 │ 5 │
│ 2019 │ 1 │ 15 │
│ 2020 │ 1 │ 5 │
│ 2020 │ 1 │ 15 │
│ 2020 │ 10 │ 5 │
│ 2020 │ 10 │ 15 │
└──────┴───────┴─────┘
```
Query:
```sql
SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP;
```
As `GROUP BY` section has three key expressions, the result contains four tables with subtotals "rolled up" from right to left:
- `GROUP BY year, month, day`;
- `GROUP BY year, month` (and `day` column is filled with zeros);
- `GROUP BY year` (now `month, day` columns are both filled with zeros);
- and totals (and all three key expression columns are zeros).
```text
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 10 │ 15 │ 1 │
│ 2020 │ 1 │ 5 │ 1 │
│ 2019 │ 1 │ 5 │ 1 │
│ 2020 │ 1 │ 15 │ 1 │
│ 2019 │ 1 │ 15 │ 1 │
│ 2020 │ 10 │ 5 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 1 │ 0 │ 2 │
│ 2020 │ 1 │ 0 │ 2 │
│ 2020 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 0 │ 0 │ 2 │
│ 2020 │ 0 │ 0 │ 4 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 0 │ 6 │
└──────┴───────┴─────┴─────────┘
```
## WITH CUBE Modifier {#with-cube-modifier}
`WITH CUBE` modifier is used to calculate subtotals for every combination of the key expressions in the `GROUP BY` list. The subtotals rows are added after the result table.
In the subtotals rows the values of all "grouped" key expressions are set to `0` or empty line.
!!! note "Note"
Mind that [HAVING](../../../sql-reference/statements/select/having.md) clause can affect the subtotals results.
**Example**
Consider the table t:
```text
┌─year─┬─month─┬─day─┐
│ 2019 │ 1 │ 5 │
│ 2019 │ 1 │ 15 │
│ 2020 │ 1 │ 5 │
│ 2020 │ 1 │ 15 │
│ 2020 │ 10 │ 5 │
│ 2020 │ 10 │ 15 │
└──────┴───────┴─────┘
```
Query:
```sql
SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE;
```
As `GROUP BY` section has three key expressions, the result contains eight tables with subtotals for all key expression combinations:
- `GROUP BY year, month, day`
- `GROUP BY year, month`
- `GROUP BY year, day`
- `GROUP BY year`
- `GROUP BY month, day`
- `GROUP BY month`
- `GROUP BY day`
- and totals.
Columns, excluded from `GROUP BY`, are filled with zeros.
```text
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 10 │ 15 │ 1 │
│ 2020 │ 1 │ 5 │ 1 │
│ 2019 │ 1 │ 5 │ 1 │
│ 2020 │ 1 │ 15 │ 1 │
│ 2019 │ 1 │ 15 │ 1 │
│ 2020 │ 10 │ 5 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 1 │ 0 │ 2 │
│ 2020 │ 1 │ 0 │ 2 │
│ 2020 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 0 │ 5 │ 2 │
│ 2019 │ 0 │ 5 │ 1 │
│ 2020 │ 0 │ 15 │ 2 │
│ 2019 │ 0 │ 15 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 0 │ 0 │ 2 │
│ 2020 │ 0 │ 0 │ 4 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 1 │ 5 │ 2 │
│ 0 │ 10 │ 15 │ 1 │
│ 0 │ 10 │ 5 │ 1 │
│ 0 │ 1 │ 15 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 1 │ 0 │ 4 │
│ 0 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 5 │ 3 │
│ 0 │ 0 │ 15 │ 3 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 0 │ 6 │
└──────┴───────┴─────┴─────────┘
```
## WITH TOTALS Modifier {#with-totals-modifier}
If the `WITH TOTALS` modifier is specified, another row will be calculated. This row will have key columns containing default values (zeros or empty lines), and columns of aggregate functions with the values calculated across all the rows (the “total” values).
@ -88,8 +236,6 @@ SELECT
FROM hits
```
However, in contrast to standard SQL, if the table doesnt have any rows (either there arent any at all, or there arent any after using WHERE to filter), an empty result is returned, and not the result from one of the rows containing the initial values of aggregate functions.
As opposed to MySQL (and conforming to standard SQL), you cant get some value of some column that is not in a key or aggregate function (except constant expressions). To work around this, you can use the any aggregate function (get the first encountered value) or min/max.
Example:
@ -105,10 +251,6 @@ GROUP BY domain
For every different key value encountered, `GROUP BY` calculates a set of aggregate function values.
`GROUP BY` is not supported for array columns.
A constant cant be specified as arguments for aggregate functions. Example: `sum(1)`. Instead of this, you can get rid of the constant. Example: `count()`.
## Implementation Details {#implementation-details}
Aggregation is one of the most important features of a column-oriented DBMS, and thus its implementation is one of the most heavily optimized parts of ClickHouse. By default, aggregation is done in memory using a hash-table. It has 40+ specializations that are chosen automatically depending on “grouping key” data types.

View File

@ -20,12 +20,12 @@ SELECT [DISTINCT] expr_list
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
[PREWHERE expr]
[WHERE expr]
[GROUP BY expr_list] [WITH TOTALS]
[GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS]
[HAVING expr]
[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr]
[LIMIT [offset_value, ]n BY columns]
[LIMIT [n, ]m] [WITH TIES]
[UNION ALL ...]
[UNION ...]
[INTO OUTFILE filename]
[FORMAT format]
```
@ -46,7 +46,7 @@ Specifics of each optional clause are covered in separate sections, which are li
- [SELECT clause](#select-clause)
- [DISTINCT clause](../../../sql-reference/statements/select/distinct.md)
- [LIMIT clause](../../../sql-reference/statements/select/limit.md)
- [UNION ALL clause](../../../sql-reference/statements/select/union-all.md)
- [UNION clause](../../../sql-reference/statements/select/union-all.md)
- [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md)
- [FORMAT clause](../../../sql-reference/statements/select/format.md)
@ -159,4 +159,111 @@ If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN
For more information, see the section “Settings”. It is possible to use external sorting (saving temporary tables to a disk) and external aggregation.
{## [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/) ##}
## SELECT modifiers {#select-modifiers}
You can use the following modifiers in `SELECT` queries.
### APPLY {#apply-modifier}
Allows you to invoke some function for each row returned by an outer table expression of a query.
**Syntax:**
``` sql
SELECT <expr> APPLY( <func> ) FROM [db.]table_name
```
**Example:**
``` sql
CREATE TABLE columns_transformers (i Int64, j Int16, k Int64) ENGINE = MergeTree ORDER by (i);
INSERT INTO columns_transformers VALUES (100, 10, 324), (120, 8, 23);
SELECT * APPLY(sum) FROM columns_transformers;
```
```
┌─sum(i)─┬─sum(j)─┬─sum(k)─┐
│ 220 │ 18 │ 347 │
└────────┴────────┴────────┘
```
### EXCEPT {#except-modifier}
Specifies the names of one or more columns to exclude from the result. All matching column names are omitted from the output.
**Syntax:**
``` sql
SELECT <expr> EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name
```
**Example:**
``` sql
SELECT * EXCEPT (i) from columns_transformers;
```
```
┌──j─┬───k─┐
│ 10 │ 324 │
│ 8 │ 23 │
└────┴─────┘
```
### REPLACE {#replace-modifier}
Specifies one or more [expression aliases](../../../sql-reference/syntax.md#syntax-expression_aliases). Each alias must match a column name from the `SELECT *` statement. In the output column list, the column that matches the alias is replaced by the expression in that `REPLACE`.
This modifier does not change the names or order of columns. However, it can change the value and the value type.
**Syntax:**
``` sql
SELECT <expr> REPLACE( <expr> AS col_name) from [db.]table_name
```
**Example:**
``` sql
SELECT * REPLACE(i + 1 AS i) from columns_transformers;
```
```
┌───i─┬──j─┬───k─┐
│ 101 │ 10 │ 324 │
│ 121 │ 8 │ 23 │
└─────┴────┴─────┘
```
### Modifier Combinations {#modifier-combinations}
You can use each modifier separately or combine them.
**Examples:**
Using the same modifier multiple times.
``` sql
SELECT COLUMNS('[jk]') APPLY(toString) APPLY(length) APPLY(max) from columns_transformers;
```
```
┌─max(length(toString(j)))─┬─max(length(toString(k)))─┐
│ 2 │ 3 │
└──────────────────────────┴──────────────────────────┘
```
Using multiple modifiers in a single query.
``` sql
SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers;
```
```
┌─sum(plus(i, 1))─┬─sum(k)─┐
│ 222 │ 347 │
└─────────────────┴────────┘
```
[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/)
<!--hide-->

View File

@ -1,5 +1,5 @@
---
toc_title: UNION ALL
toc_title: UNION
---
# UNION ALL Clause {#union-all-clause}
@ -25,10 +25,13 @@ Type casting is performed for unions. For example, if two queries being combined
Queries that are parts of `UNION ALL` cant be enclosed in round brackets. [ORDER BY](../../../sql-reference/statements/select/order-by.md) and [LIMIT](../../../sql-reference/statements/select/limit.md) are applied to separate queries, not to the final result. If you need to apply a conversion to the final result, you can put all the queries with `UNION ALL` in a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
## Limitations {#limitations}
# UNION DISTINCT Clause {#union-distinct-clause}
The difference between `UNION ALL` and `UNION DISTINCT` is that `UNION DISTINCT` will do a distinct transform for union result, it is equivalent to `SELECT DISTINCT` from a subquery containing `UNION ALL`.
# UNION Clause {#union-clause}
By default, `UNION` has the same behavior as `UNION DISTINCT`, but you can specify union mode by setting `union_default_mode`, values can be 'ALL', 'DISTINCT' or empty string. However, if you use `UNION` with setting `union_default_mode` to empty string, it will throw an exception.
Only `UNION ALL` is supported. The regular `UNION` (`UNION DISTINCT`) is not supported. If you need `UNION DISTINCT`, you can write `SELECT DISTINCT` from a subquery containing `UNION ALL`.
## Implementation Details {#implementation-details}
Queries that are parts of `UNION ALL` can be run simultaneously, and their results can be mixed together.
Queries that are parts of `UNION/UNION ALL/UNION DISTINCT` can be run simultaneously, and their results can be mixed together.

View File

@ -291,7 +291,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
Crear [diccionario externo](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) con dado [estructura](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [fuente](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [diseño](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) y [vida](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -291,7 +291,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
ایجاد [فرهنگ لغت خارجی](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) با توجه به [ساختار](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [متن](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [طرحبندی](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) و [طول عمر](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -291,7 +291,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
Crée [externe dictionnaire](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) avec le [structure](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [disposition](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) et [vie](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -291,7 +291,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
作成 [外部辞書](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) 与えられたと [構造](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [ソース](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [レイアウト](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) と [生涯](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -2187,4 +2187,10 @@ SELECT CAST(toNullable(toInt32(0)) AS Int32) as x, toTypeName(x);
Значение по умолчанию: `1`.
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
Позволяет настраивать представление `NULL` для формата выходных данных [TSV](../../interfaces/formats.md#tabseparated). Настройка управляет форматом выходных данных, `\N` является единственным поддерживаемым представлением для формата входных данных TSV.
Значение по умолчанию: `\N`.
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->

View File

@ -0,0 +1,70 @@
# system.replicated_fetches {#system_tables-replicated_fetches}
Содержит информацию о выполняемых в данный момент фоновых операциях скачивания кусков данных с других реплик.
Столбцы:
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных.
- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
- `elapsed` ([Float64](../../sql-reference/data-types/float.md)) — время, прошедшее от момента начала скачивания куска, в секундах.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — доля выполненной работы от 0 до 1.
- `result_part_name` ([String](../../sql-reference/data-types/string.md)) — имя скачиваемого куска.
- `result_part_path` ([String](../../sql-reference/data-types/string.md)) — абсолютный путь к скачиваемому куску.
- `partition_id` ([String](../../sql-reference/data-types/string.md)) — идентификатор партиции.
- `total_size_bytes_compressed` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер сжатой информации в скачиваемом куске в байтах.
- `bytes_read_compressed` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер сжатой информации, считанной из скачиваемого куска, в байтах.
- `source_replica_path` ([String](../../sql-reference/data-types/string.md)) — абсолютный путь к исходной реплике.
- `source_replica_hostname` ([String](../../sql-reference/data-types/string.md)) — имя хоста исходной реплики.
- `source_replica_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — номер порта исходной реплики.
- `interserver_scheme` ([String](../../sql-reference/data-types/string.md)) — имя межсерверной схемы.
- `URI` ([String](../../sql-reference/data-types/string.md)) — универсальный идентификатор ресурса.
- `to_detached` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на использование выражения `TO DETACHED` в текущих фоновых операциях.
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — идентификатор потока.
**Пример**
``` sql
SELECT * FROM system.replicated_fetches LIMIT 1 FORMAT Vertical;
```
``` text
Row 1:
──────
database: default
table: t
elapsed: 7.243039876
progress: 0.41832135995612835
result_part_name: all_0_0_0
result_part_path: /var/lib/clickhouse/store/700/70080a04-b2de-4adf-9fa5-9ea210e81766/all_0_0_0/
partition_id: all
total_size_bytes_compressed: 1052783726
bytes_read_compressed: 440401920
source_replica_path: /clickhouse/test/t/replicas/1
source_replica_hostname: node1
source_replica_port: 9009
interserver_scheme: http
URI: http://node1:9009/?endpoint=DataPartsExchange%3A%2Fclickhouse%2Ftest%2Ft%2Freplicas%2F1&part=all_0_0_0&client_protocol_version=4&compress=false
to_detached: 0
thread_id: 54
```
**Смотрите также**
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system/#query-language-system-replicated)
[Оригинальная статья](https://clickhouse.tech/docs/en/operations/system_tables/replicated_fetches) <!--hide-->

View File

@ -0,0 +1,43 @@
# Обфускатор ClickHouse
Простой инструмент для обфускации табличных данных.
Он считывает данные входной таблицы и создает выходную таблицу, которая сохраняет некоторые свойства входных данных, но при этом содержит другие данные.
Это позволяет публиковать практически реальные данные и использовать их в тестах на производительность.
Обфускатор предназначен для сохранения следующих свойств данных:
- кардинальность (количество уникальных данных) для каждого столбца и каждого кортежа столбцов;
- условная кардинальность: количество уникальных данных одного столбца в соответствии со значением другого столбца;
- вероятностные распределения абсолютного значения целых чисел; знак числа типа Int; показатель степени и знак для чисел с плавающей запятой;
- вероятностное распределение длины строк;
- вероятность нулевых значений чисел; пустые строки и массивы, `NULL`;
- степень сжатия данных алгоритмом LZ77 и семейством энтропийных кодеков;
- непрерывность (величина разницы) значений времени в таблице; непрерывность значений с плавающей запятой;
- дату из значений `DateTime`;
- кодировка UTF-8 значений строки;
- строковые значения выглядят естественным образом.
Большинство перечисленных выше свойств пригодны для тестирования производительности. Чтение данных, фильтрация, агрегирование и сортировка будут работать почти с той же скоростью, что и исходные данные, благодаря сохраненной кардинальности, величине, степени сжатия и т. д.
Он работает детерминированно. Вы задаёте значение инициализатора, а преобразование полностью определяется входными данными и инициализатором.
Некоторые преобразования выполняются один к одному, и их можно отменить. Поэтому нужно использовать большое значение инициализатора и хранить его в секрете.
Обфускатор использует некоторые криптографические примитивы для преобразования данных, но, с криптографической точки зрения, результат будет небезопасным. В нем могут сохраниться данные, которые не следует публиковать.
Он всегда оставляет без изменений числа 0, 1, -1, даты, длины массивов и нулевые флаги.
Например, если у вас есть столбец `IsMobile` в таблице со значениями 0 и 1, то в преобразованных данных он будет иметь то же значение.
Таким образом, пользователь сможет посчитать точное соотношение мобильного трафика.
Давайте рассмотрим случай, когда у вас есть какие-то личные данные в таблице (например, электронная почта пользователя), и вы не хотите их публиковать.
Если ваша таблица достаточно большая и содержит несколько разных электронных почтовых адресов, и ни один из них не встречается часто, то обфускатор полностью анонимизирует все данные. Но, если у вас есть небольшое количество разных значений в столбце, он может скопировать некоторые из них.
В этом случае вам следует посмотреть на алгоритм работы инструмента и настроить параметры командной строки.
Обфускатор полезен в работе со средним объемом данных (не менее 1000 строк).

View File

@ -44,8 +44,6 @@ SELECT sum(y) FROM t_null_big
└────────┘
```
Функция `sum` работает с `NULL` как с `0`. В частности, это означает, что если на вход в функцию подать выборку, где все значения `NULL`, то результат будет `0`, а не `NULL`.
Теперь с помощью функции `groupArray` сформируем массив из столбца `y`:
``` sql

View File

@ -0,0 +1,40 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Инициализирует агрегацию для введеных строчек. Предназначена для функций с суффиксом `State`.
Поможет вам проводить тесты или работать со столбцами типов: `AggregateFunction` и `AggregationgMergeTree`.
**Синтаксис**
``` sql
initializeAggregation (aggregate_function, column_1, column_2);
```
**Параметры**
- `aggregate_function` — название функции агрегации, состояние которой нужно создать. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — столбец, который передается в функцию агрегации как аргумент. [String](../../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
Возвращает результат агрегации введенной информации. Тип возвращаемого значения такой же, как и для функции, которая становится первым аргументом для `initializeAgregation`.
Пример:
Возвращаемый тип функций с суффиксом `State``AggregateFunction`.
**Пример**
Запрос:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Результат:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -0,0 +1,382 @@
---
toc_priority: 67
toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438 \u0434\u043b\u044f \u0448\u0438\u0444\u0440\u043e\u0432\u0430\u043d\u0438\u044f"
---
# Функции шифрования {#encryption-functions}
Даннвые функции реализуют шифрование и расшифровку данных с помощью AES (Advanced Encryption Standard) алгоритма.
Длина ключа зависит от режима шифрования. Он может быть длинной в 16, 24 и 32 байта для режимов шифрования `-128-`, `-196-` и `-256-` соответственно.
Длина инициализирующего вектора всегда 16 байт (лишнии байты игнорируются).
Обратите внимание, что эти функции работают медленно.
## encrypt {#encrypt}
Функция поддерживает шифрование данных следующими режимами:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
- aes-128-gcm, aes-192-gcm, aes-256-gcm
**Синтаксис**
``` sql
encrypt('mode', 'plaintext', 'key' [, iv, aad])
```
**Параметры**
- `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string).
- `plaintext` — текст, который будет зашифрован. [String](../../sql-reference/data-types/string.md#string).
- `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string).
- `iv` — инициализирующий вектор. Обязателен для `-gcm` режимов, для остальных режимов необязателен. [String](../../sql-reference/data-types/string.md#string).
- `aad` — дополнительные аутентифицированные данные. Не шифруются, но влияют на расшифровку. Параметр работает только с `-gcm` режимами. Для остальных вызовет исключение. [String](../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
- Зашифрованная строка. [String](../../sql-reference/data-types/string.md#string).
**Примеры**
Создадим такую таблицу:
Запрос:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Вставим эти данные:
Запрос:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Пример без `iv`:
Запрос:
``` sql
SELECT 'aes-128-ecb' AS mode, hex(encrypt(mode, input, key16)) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─hex(encrypt('aes-128-ecb', input, key16))────────────────────────┐
│ aes-128-ecb │ 4603E6862B0D94BBEC68E0B0DF51D60F │
│ aes-128-ecb │ 3004851B86D3F3950672DE7085D27C03 │
│ aes-128-ecb │ E807F8C8D40A11F65076361AFC7D8B68D8658C5FAA6457985CAA380F16B3F7E4 │
└─────────────┴──────────────────────────────────────────────────────────────────┘
```
Пример с `iv`:
Запрос:
``` sql
SELECT 'aes-256-ctr' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─hex(encrypt('aes-256-ctr', input, key32, iv))─┐
│ aes-256-ctr │ │
│ aes-256-ctr │ 7FB039F7 │
│ aes-256-ctr │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2B325949 │
└─────────────┴───────────────────────────────────────────────┘
```
Пример в режиме `-gcm`:
Запрос:
``` sql
SELECT 'aes-256-gcm' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─hex(encrypt('aes-256-gcm', input, key32, iv))──────────────────────────┐
│ aes-256-gcm │ E99DBEBC01F021758352D7FBD9039EFA │
│ aes-256-gcm │ 8742CE3A7B0595B281C712600D274CA881F47414 │
│ aes-256-gcm │ A44FD73ACEB1A64BDE2D03808A2576EDBB60764CC6982DB9AF2C33C893D91B00C60DC5 │
└─────────────┴────────────────────────────────────────────────────────────────────────┘
```
Пример в режиме `-gcm` и с `aad`:
Запрос:
``` sql
SELECT 'aes-192-gcm' AS mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─hex(encrypt('aes-192-gcm', input, key24, iv, 'AAD'))───────────────────┐
│ aes-192-gcm │ 04C13E4B1D62481ED22B3644595CB5DB │
│ aes-192-gcm │ 9A6CF0FD2B329B04EAD18301818F016DF8F77447 │
│ aes-192-gcm │ B961E9FD9B940EBAD7ADDA75C9F198A40797A5EA1722D542890CC976E21113BBB8A7AA │
└─────────────┴────────────────────────────────────────────────────────────────────────┘
```
## aes_encrypt_mysql {#aes_encrypt_mysql}
Совместима с шифрованием myqsl, результат может быть расшифрован функцией [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt).
Функция поддерживает шифрофание данных следующими режимами:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
**Синтаксис**
```sql
aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv])
```
**Параметры**
- `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string).
- `plaintext` — текст, который будет зашифрован. [String](../../sql-reference/data-types/string.md#string).
- `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string).
- `iv` — инициализирующий вектор. Необязателен. [String](../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
- Зашифрованная строка. [String](../../sql-reference/data-types/string.md#string).
**Примеры**
Создадим такую таблицу:
Запрос:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Вставим эти данные:
Запрос:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Пример без `iv`:
Запрос:
``` sql
SELECT 'aes-128-cbc' AS mode, hex(aes_encrypt_mysql(mode, input, key32)) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─hex(aes_encrypt_mysql('aes-128-cbc', input, key32))──────────────┐
│ aes-128-cbc │ FEA8CFDE6EE2C6E7A2CC6ADDC9F62C83 │
│ aes-128-cbc │ 78B16CD4BE107660156124C5FEE6454A │
│ aes-128-cbc │ 67C0B119D96F18E2823968D42871B3D179221B1E7EE642D628341C2B29BA2E18 │
└─────────────┴──────────────────────────────────────────────────────────────────┘
```
Пример с `iv`:
Запрос:
``` sql
SELECT 'aes-256-cfb128' AS mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test;
```
Результат:
``` text
┌─mode───────────┬─hex(aes_encrypt_mysql('aes-256-cfb128', input, key32, iv))─┐
│ aes-256-cfb128 │ │
│ aes-256-cfb128 │ 7FB039F7 │
│ aes-256-cfb128 │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2BB5174F │
└────────────────┴────────────────────────────────────────────────────────────┘
```
## decrypt {#decrypt}
Функция поддерживает расшифровку данных следующими режимами:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
- aes-128-gcm, aes-192-gcm, aes-256-gcm
**Синтаксис**
```sql
decrypt('mode', 'ciphertext', 'key' [, iv, aad])
```
**Параметры**
- `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string).
- `ciphertext` — зашифрованный текст, который будет расшифрован. [String](../../sql-reference/data-types/string.md#string).
- `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string).
- `iv` — инициализирующий вектор. Обязателен для `-gcm` режимов, для остальных режимов опциональный. [String](../../sql-reference/data-types/string.md#string).
- `aad` — дополнительные аутентифицированные данные. Текст не будет расшифрован, если это значение неверно. Работает только с `-gcm` режимами. Для остальных вызовет исключение. [String](../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
- Расшифрованная строка. [String](../../sql-reference/data-types/string.md#string).
**Примеры**
Создадим такую таблицу:
Запрос:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Вставим эти данные:
Запрос:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Запрос:
``` sql
SELECT 'aes-128-ecb' AS mode, decrypt(mode, encrypt(mode, input, key16), key16) FROM encryption_test;
```
Результат:
```text
┌─mode────────┬─decrypt('aes-128-ecb', encrypt('aes-128-ecb', input, key16), key16)─┐
│ aes-128-ecb │ │
│ aes-128-ecb │ text │
│ aes-128-ecb │ What Is ClickHouse? │
└─────────────┴─────────────────────────────────────────────────────────────────────┘
```
## aes_decrypt_mysql {#aes_decrypt_mysql}
Совместима с шифрованием myqsl и может расшифровать данные, зашифрованные функцией [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt).
Функция поддерживает расшифровку данных следующими режимами:
- aes-128-ecb, aes-192-ecb, aes-256-ecb
- aes-128-cbc, aes-192-cbc, aes-256-cbc
- aes-128-cfb1, aes-192-cfb1, aes-256-cfb1
- aes-128-cfb8, aes-192-cfb8, aes-256-cfb8
- aes-128-cfb128, aes-192-cfb128, aes-256-cfb128
- aes-128-ofb, aes-192-ofb, aes-256-ofb
**Синтаксис**
```sql
aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv])
```
**Параметры**
- `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string).
- `ciphertext` — зашифрованный текст, который будет расшифрован. [String](../../sql-reference/data-types/string.md#string).
- `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string).
- `iv` — инициализирующий вектор. Необязателен. [String](../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
- Расшифрованная строка. [String](../../sql-reference/data-types/string.md#string).
**Примеры**
Создадим такую таблицу:
Запрос:
``` sql
CREATE TABLE encryption_test
(
input String,
key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'),
iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'),
key32 String DEFAULT substring(key, 1, 32),
key24 String DEFAULT substring(key, 1, 24),
key16 String DEFAULT substring(key, 1, 16)
) Engine = Memory;
```
Вставим эти данные:
Запрос:
``` sql
INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?');
```
Запрос:
``` sql
SELECT 'aes-128-cbc' AS mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key), key) FROM encryption_test;
```
Результат:
``` text
┌─mode────────┬─aes_decrypt_mysql('aes-128-cbc', aes_encrypt_mysql('aes-128-cbc', input, key), key)─┐
│ aes-128-cbc │ │
│ aes-128-cbc │ text │
│ aes-128-cbc │ What Is ClickHouse? │
└─────────────┴─────────────────────────────────────────────────────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/ru/sql-reference/functions/encryption_functions/) <!--hide-->

View File

@ -306,3 +306,68 @@ execute_native_thread_routine
start_thread
clone
```
## tid {#tid}
Возвращает id потока, в котором обрабатывается текущий [Block](https://clickhouse.tech/docs/ru/development/architecture/#block).
**Синтаксис**
``` sql
tid()
```
**Возвращаемое значение**
- Id текущего потока. [Uint64](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Пример**
Запрос:
``` sql
SELECT tid();
```
Результат:
``` text
┌─tid()─┐
│ 3878 │
└───────┘
```
## logTrace {#logtrace}
Выводит сообщение в лог сервера для каждого [Block](https://clickhouse.tech/docs/ru/development/architecture/#block).
**Синтаксис**
``` sql
logTrace('message')
```
**Параметры**
- `message` — сообщение, которое отправляется в серверный лог. [String](../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
- Всегда возвращает 0.
**Example**
Запрос:
``` sql
SELECT logTrace('logTrace message');
```
Результат:
``` text
┌─logTrace('logTrace message')─┐
│ 0 │
└──────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/introspection/) <!--hide-->

View File

@ -16,7 +16,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
Создаёт [внешний словарь](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) с заданной [структурой](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [источником](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [способом размещения в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) и [периодом обновления](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
@ -27,5 +27,5 @@ LIFETIME([MIN val1] MAX val2)
Смотрите [Внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/dictionary)
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/dictionary)
<!--hide-->

View File

@ -27,9 +27,9 @@ toc_title: FROM
### Недостатки {#drawbacks}
Запросы, которые используют `FINAL` выполняются не так быстро, как аналогичные запросы без него, потому что:
Запросы, которые используют `FINAL` выполняются немного медленее, чем аналогичные запросы без него, потому что:
- Запрос выполняется в одном потоке, и данные мёржатся во время выполнения запроса.
- Данные мёржатся во время выполнения запроса.
- Запросы с модификатором `FINAL` читают столбцы первичного ключа в дополнение к столбцам, используемым в запросе.
**В большинстве случаев избегайте использования `FINAL`.** Общий подход заключается в использовании агрегирующих запросов, которые предполагают, что фоновые процессы движков семейства `MergeTree` ещё не случились (например, сами отбрасывают дубликаты). {## TODO: examples ##}

View File

@ -43,6 +43,153 @@ toc_title: GROUP BY
Если в `GROUP BY` передать несколько ключей, то в результате мы получим все комбинации выборки, как если бы `NULL` был конкретным значением.
## Модификатор WITH ROLLUP {#with-rollup-modifier}
Модификатор `WITH ROLLUP` применяется для подсчета подытогов для ключевых выражений. При этом учитывается порядок следования ключевых выражений в списке `GROUP BY`. Подытоги подсчитываются в обратном порядке: сначала для последнего ключевого выражения в списке, потом для предпоследнего и так далее вплоть до самого первого ключевого выражения.
Строки с подытогами добавляются в конец результирующей таблицы. В колонках, по которым строки уже сгруппированы, указывается значение `0` или пустая строка.
!!! note "Примечание"
Если в запросе есть секция [HAVING](../../../sql-reference/statements/select/having.md), она может повлиять на результаты расчета подытогов.
**Пример**
Рассмотрим таблицу t:
```text
┌─year─┬─month─┬─day─┐
│ 2019 │ 1 │ 5 │
│ 2019 │ 1 │ 15 │
│ 2020 │ 1 │ 5 │
│ 2020 │ 1 │ 15 │
│ 2020 │ 10 │ 5 │
│ 2020 │ 10 │ 15 │
└──────┴───────┴─────┘
```
Запрос:
```sql
SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH ROLLUP;
```
Поскольку секция `GROUP BY` содержит три ключевых выражения, результат состоит из четырех таблиц с подытогами, которые как бы "сворачиваются" справа налево:
- `GROUP BY year, month, day`;
- `GROUP BY year, month` (а колонка `day` заполнена нулями);
- `GROUP BY year` (теперь обе колонки `month, day` заполнены нулями);
- и общий итог (все три колонки с ключевыми выражениями заполнены нулями).
```text
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 10 │ 15 │ 1 │
│ 2020 │ 1 │ 5 │ 1 │
│ 2019 │ 1 │ 5 │ 1 │
│ 2020 │ 1 │ 15 │ 1 │
│ 2019 │ 1 │ 15 │ 1 │
│ 2020 │ 10 │ 5 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 1 │ 0 │ 2 │
│ 2020 │ 1 │ 0 │ 2 │
│ 2020 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 0 │ 0 │ 2 │
│ 2020 │ 0 │ 0 │ 4 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 0 │ 6 │
└──────┴───────┴─────┴─────────┘
```
## Модификатор WITH CUBE {#with-cube-modifier}
Модификатор `WITH CUBE` применятеся для расчета подытогов по всем комбинациям группировки ключевых выражений в списке `GROUP BY`.
Строки с подытогами добавляются в конец результирующей таблицы. В колонках, по которым выполняется группировка, указывается значение `0` или пустая строка.
!!! note "Примечание"
Если в запросе есть секция [HAVING](../../../sql-reference/statements/select/having.md), она может повлиять на результаты расчета подытогов.
**Пример**
Рассмотрим таблицу t:
```text
┌─year─┬─month─┬─day─┐
│ 2019 │ 1 │ 5 │
│ 2019 │ 1 │ 15 │
│ 2020 │ 1 │ 5 │
│ 2020 │ 1 │ 15 │
│ 2020 │ 10 │ 5 │
│ 2020 │ 10 │ 15 │
└──────┴───────┴─────┘
```
Query:
```sql
SELECT year, month, day, count(*) FROM t GROUP BY year, month, day WITH CUBE;
```
Поскольку секция `GROUP BY` содержит три ключевых выражения, результат состоит из восьми таблиц с подытогами — по таблице для каждой комбинации ключевых выражений:
- `GROUP BY year, month, day`
- `GROUP BY year, month`
- `GROUP BY year, day`
- `GROUP BY year`
- `GROUP BY month, day`
- `GROUP BY month`
- `GROUP BY day`
- и общий итог.
Колонки, которые не участвуют в `GROUP BY`, заполнены нулями.
```text
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 10 │ 15 │ 1 │
│ 2020 │ 1 │ 5 │ 1 │
│ 2019 │ 1 │ 5 │ 1 │
│ 2020 │ 1 │ 15 │ 1 │
│ 2019 │ 1 │ 15 │ 1 │
│ 2020 │ 10 │ 5 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 1 │ 0 │ 2 │
│ 2020 │ 1 │ 0 │ 2 │
│ 2020 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2020 │ 0 │ 5 │ 2 │
│ 2019 │ 0 │ 5 │ 1 │
│ 2020 │ 0 │ 15 │ 2 │
│ 2019 │ 0 │ 15 │ 1 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 2019 │ 0 │ 0 │ 2 │
│ 2020 │ 0 │ 0 │ 4 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 1 │ 5 │ 2 │
│ 0 │ 10 │ 15 │ 1 │
│ 0 │ 10 │ 5 │ 1 │
│ 0 │ 1 │ 15 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 1 │ 0 │ 4 │
│ 0 │ 10 │ 0 │ 2 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 5 │ 3 │
│ 0 │ 0 │ 15 │ 3 │
└──────┴───────┴─────┴─────────┘
┌─year─┬─month─┬─day─┬─count()─┐
│ 0 │ 0 │ 0 │ 6 │
└──────┴───────┴─────┴─────────┘
```
## Модификатор WITH TOTALS {#with-totals-modifier}
Если указан модификатор `WITH TOTALS`, то будет посчитана ещё одна строчка, в которой в столбцах-ключах будут содержаться значения по умолчанию (нули, пустые строки), а в столбцах агрегатных функций - значения, посчитанные по всем строкам («тотальные» значения).
@ -86,8 +233,6 @@ SELECT
FROM hits
```
Но, в отличие от стандартного SQL, если в таблице нет строк (вообще нет или после фильтрации с помощью WHERE), в качестве результата возвращается пустой результат, а не результат из одной строки, содержащий «начальные» значения агрегатных функций.
В отличие от MySQL (и в соответствии со стандартом SQL), вы не можете получить какое-нибудь значение некоторого столбца, не входящего в ключ или агрегатную функцию (за исключением константных выражений). Для обхода этого вы можете воспользоваться агрегатной функцией any (получить первое попавшееся значение) или min/max.
Пример:
@ -103,10 +248,6 @@ GROUP BY domain
GROUP BY вычисляет для каждого встретившегося различного значения ключей, набор значений агрегатных функций.
Не поддерживается GROUP BY по столбцам-массивам.
Не поддерживается указание констант в качестве аргументов агрегатных функций. Пример: `sum(1)`. Вместо этого, вы можете избавиться от констант. Пример: `count()`.
## Детали реализации {#implementation-details}
Агрегация является одной из наиболее важных возможностей столбцовых СУБД, и поэтому её реализация является одной из наиболее сильно оптимизированных частей ClickHouse. По умолчанию агрегирование выполняется в памяти с помощью хэш-таблицы. Она имеет более 40 специализаций, которые выбираются автоматически в зависимости от типов данных ключа группировки.

View File

@ -18,7 +18,7 @@ SELECT [DISTINCT] expr_list
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON <expr_list>)|(USING <column_list>)
[PREWHERE expr]
[WHERE expr]
[GROUP BY expr_list] [WITH TOTALS]
[GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS]
[HAVING expr]
[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr]
[LIMIT [offset_value, ]n BY columns]

View File

@ -291,7 +291,7 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```
Oluşturuyor [dış sözlük](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) verilen ile [yapılı](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [kaynaklı](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [düzen](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) ve [ömür](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -21,15 +21,15 @@ toc_title: "\u266A\u64CD\u573A\u266A"
ClickHouse体验还有如下
[ClickHouse管理服务](https://cloud.yandex.com/services/managed-clickhouse)
实例托管 [Yandex云](https://cloud.yandex.com/).
更多信息 [云提供商](../commercial/cloud.md).
实例托管 [Yandex云](https://cloud.yandex.com/)
更多信息 [云提供商](../commercial/cloud.md)
ClickHouse体验平台界面实际上是通过ClickHouse [HTTP API](../interfaces/http.md)接口实现的.
体验平台后端只是一个ClickHouse集群没有任何额外的服务器端应用程序。
体验平台也同样提供了ClickHouse HTTPS服务端口。
您可以使用任何HTTP客户端向体验平台进行查询例如 [curl](https://curl.haxx.se) 或 [wget](https://www.gnu.org/software/wget/),或使用以下方式建立连接 [JDBC](../interfaces/jdbc.md) 或 [ODBC](../interfaces/odbc.md) 司机
有关支持ClickHouse的软件产品的更多信息请访问 [这里](../interfaces/index.md).
您可以使用任何HTTP客户端向体验平台进行查询例如 [curl](https://curl.haxx.se) 或 [wget](https://www.gnu.org/software/wget/),或使用以下方式建立连接 [JDBC](../interfaces/jdbc.md) 或 [ODBC](../interfaces/odbc.md) 驱动。
有关支持ClickHouse的软件产品的更多信息请访问 [这里](../interfaces/index.md)
| 参数 | 值 |
|:---------|:--------------------------------------|

View File

@ -4,53 +4,50 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)
在传统的行式数据库系统中,数据按如下顺序存储:
| row | watchID | JavaEnable | title | GoodEvent | EventTime |
|-----|-------------|------------|------------|-----------|---------------------|
| #0 | 89354350662 | 1 | 投资者关系 | 1 | 2016-05-18 05:19:20 |
| #1 | 90329509958 | 0 | 联系我们 | 1 | 2016-05-18 08:10:20 |
| #2 | 89953706054 | 1 | 任务 | 1 | 2016-05-18 07:38:00 |
| #N | … | … | … | … | … |
| Row | WatchID | JavaEnable | Title | GoodEvent | EventTime |
|-----|-------------|------------|--------------------|-----------|---------------------|
| #0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 |
| #1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 |
| #2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 |
| #N | … | … | … | … | … |
处于同一行中的数据总是被物理的存储在一起。
常见的行式数据库系统有: MySQL、Postgres和MS SQL Server。
{: .灰色 }
常见的行式数据库系统有:`MySQL`、`Postgres`和`MS SQL Server`。
在列式数据库系统中,数据按如下的顺序存储:
| row: | #0 | #1 | #2 | #N |
| Row: | #0 | #1 | #2 | #N |
|-------------|---------------------|---------------------|---------------------|-----|
| watchID: | 89354350662 | 90329509958 | 89953706054 | … |
| WatchID: | 89354350662 | 90329509958 | 89953706054 | … |
| JavaEnable: | 1 | 0 | 1 | … |
| title: | 投资者关系 | 联系我们 | 任务 | … |
| Title: | Investor Relations | Contact us | Mission | … |
| GoodEvent: | 1 | 1 | 1 | … |
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
该示例中只展示了数据在列式数据库中数据的排列方式。
对于存储而言,列式数据库总是将同一列的数据存储在一起,不同列的数据也总是分开存储。
这些示例只显示了数据的排列顺序。来自不同列的值被单独存储,来自同一列的数据被存储在一起。
常见的列式数据库有: Vertica、 Paraccel (Actian MatrixAmazon Redshift)、 Sybase IQ、 Exasol、 Infobright、 InfiniDB、 MonetDB (VectorWise Actian Vector)、 LucidDB、 SAP HANA、 Google Dremel、 Google PowerDrill、 Druid、 kdb+。
{: .灰色 }
不同的数据存储方式适用不同的业务场景,数据访问的场景包括:进行了何种查询、多久查询一次以及各类查询的比例; 每种查询读取多少数据————行、列和字节;读取数据和写入数据之间的关系;使用的数据集大小以及如何使用本地的数据集;是否使用事务,以及它们是如何进行隔离的;数据的复制机制与数据的完整性要求;每种类型的查询要求的延迟与吞吐量等等。
不同的数据存储方式适用不同的业务场景,数据访问的场景包括:进行了何种查询、多久查询一次以及各类查询的比例;每种类型的查询(行、列和字节)读取多少数据;读取数据和更新之间的关系;使用的数据集大小以及如何使用本地的数据集;是否使用事务,以及它们是如何进行隔离的;数据的复制机制与数据的完整性要求;每种类型的查询要求的延迟与吞吐量等等。
系统负载越高,依据使用场景进行定制化就越重要,并且定制将会变的越精细。没有一个系统能够同时适用所有明显不同的业务场景。如果系统适用于广泛的场景,在负载高的情况下,要兼顾所有的场景,那么将不得不做出选择。是要平衡还是要效率?
系统负载越高,依据使用场景进行定制化就越重要,并且定制将会变的越精细。没有一个系统能够同时适用所有不同的业务场景。如果系统适用于广泛的场景,在负载高的情况下,要兼顾所有的场景,那么将不得不做出选择。是要平衡还是要效率?
## OLAP场景的关键特征 {#olapchang-jing-de-guan-jian-te-zheng}
- 大多数是读请求
- 数据总是以相当大的批(\> 1000 rows)进行写入
- 不修改已添加的数据
- 每次查询都从数据库中读取大量的行,但是同时又仅需要少量的列
- 大多数是读请求
- 数据以相当大的批次(\> 1000行)更新,而不是单行更新;或者根本没有更新。
- 已添加到数据库的数据不能修改。
- 对于读取,从数据库中提取相当多的行,但只提取列的一小部分。
- 宽表,即每个表包含着大量的列
- 较少的查询(通常每台服务器每秒数百个查询或更少)
- 查询相对较少(通常每台服务器每秒查询数百次或更少)
- 对于简单查询允许延迟大约50毫秒
- 列中的数据相对较小: 数字和短字符串(例如每个URL 60个字节)
- 处理单个查询时需要高吞吐量(每个服务器每秒高达数十亿行)
- 列中的数据相对较小:数字和短字符串(例如每个URL 60个字节)
- 处理单个查询时需要高吞吐量(每台服务器每秒可达数十亿行)
- 事务不是必须的
- 对数据一致性要求低
- 每一个查询除了一个大表外都很小
- 查询结果明显小于源数据,换句话说,数据被过滤或聚合后能够被盛放在单台服务器的内存
- 每个查询有一个大表。除了他意以外,其他的都很小。
- 查询结果明显小于源数据。换句话说数据经过过滤或聚合因此结果适合于单个服务器的RAM
很容易可以看出OLAP场景与其他通常业务场景(例如,OLTP或K/V)有很大的不同, 因此想要使用OLTP或Key-Value数据库去高效的处理分析查询场景并不是非常完美的适用方案。例如使用OLAP数据库去处理分析请求通常要优于使用MongoDB或Redis去处理分析请求。

View File

@ -1,6 +1,6 @@
---
toc_priority: 8
toc_title: "\u91C7\u7528\u8005"
toc_priority: 5
toc_title: "ClickHouse用户"
---
# ClickHouse用户 {#clickhouse-adopters}

View File

@ -1,3 +1,8 @@
---
toc_priority: 2
toc_title: ClickHouse的特性
---
# ClickHouse的特性 {#clickhouse-de-te-xing}
## 真正的列式数据库管理系统 {#zhen-zheng-de-lie-shi-shu-ju-ku-guan-li-xi-tong}
@ -12,9 +17,13 @@
在一些列式数据库管理系统中(例如InfiniDB CE 和 MonetDB) 并没有使用数据压缩。但是, 若想达到比较优异的性能,数据压缩确实起到了至关重要的作用。
除了在磁盘空间和CPU消耗之间进行不同权衡的高效通用压缩编解码器之外ClickHouse还提供针对特定类型数据的[专用编解码器](../sql-reference/statements/create/table.md#create-query-specialized-codecs)这使得ClickHouse能够与更小的数据库(如时间序列数据库)竞争并超越它们。
## 数据的磁盘存储 {#shu-ju-de-ci-pan-cun-chu}
许多的列式数据库(如 SAP HANA, Google PowerDrill)只能在内存中工作这种方式会造成比实际更多的设备预算。ClickHouse被设计用于工作在传统磁盘上的系统它提供每GB更低的存储成本但如果有可以使用SSD和内存它也会合理的利用这些资源。
许多的列式数据库(如 SAP HANA, Google PowerDrill)只能在内存中工作,这种方式会造成比实际更多的设备预算。
ClickHouse被设计用于工作在传统磁盘上的系统它提供每GB更低的存储成本但如果可以使用SSD和内存它也会合理的利用这些资源。
## 多核心并行处理 {#duo-he-xin-bing-xing-chu-li}
@ -27,9 +36,11 @@ ClickHouse会使用服务器上一切可用的资源从而以最自然的方
## 支持SQL {#zhi-chi-sql}
ClickHouse支持基于SQL的声明式查询语言该语言大部分情况下是与SQL标准兼容的。
支持的查询包括 GROUP BYORDER BYINJOIN以及非相关子查询。
不支持窗口函数和相关子查询。
ClickHouse支持一种[基于SQL的声明式查询语言](../sql-reference/index.md),它在许多情况下与[ANSI SQL标准](../sql-reference/ansi.md)相同。
支持的查询[GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), [FROM](../sql-reference/statements/select/from.md), [JOIN](../sql-reference/statements/select/join.md), [IN](../sql-reference/operators/in.md)以及非相关子查询。
相关(依赖性)子查询和窗口函数暂不受支持,但将来会被实现。
## 向量引擎 {#xiang-liang-yin-qing}
@ -55,12 +66,20 @@ ClickHouse提供各种各样在允许牺牲数据精度的情况下对查询进
2. 基于数据的部分样本进行近似查询。这时,仅会从磁盘检索少部分比例的数据。
3. 不使用全部的聚合条件,通过随机选择有限个数据聚合条件进行聚合。这在数据聚合条件满足某些分布条件下,在提供相当准确的聚合结果的同时降低了计算资源的使用。
## Adaptive Join Algorithm {#adaptive-join-algorithm}
ClickHouse支持自定义[JOIN](../sql-reference/statements/select/join.md)多个表,它更倾向于散列连接算法,如果有多个大表,则使用合并-连接算法
## 支持数据复制和数据完整性 {#zhi-chi-shu-ju-fu-zhi-he-shu-ju-wan-zheng-xing}
ClickHouse使用异步的多主复制技术。当数据被写入任何一个可用副本后系统会在后台将数据分发给其他副本以保证系统在不同副本上保持相同的数据。在大多数情况下ClickHouse能在故障后自动恢复在一些少数的复杂情况下需要手动恢复。
更多信息,参见 [数据复制](../engines/table-engines/mergetree-family/replication.md)。
## 角色的访问控制 {#role-based-access-control}
ClickHouse使用SQL查询实现用户帐户管理并允许[角色的访问控制](../operations/access-rights.md)类似于ANSI SQL标准和流行的关系数据库管理系统。
# 限制 {#clickhouseke-xian-zhi}
1. 没有完整的事务支持。

View File

@ -1,3 +1,8 @@
---
toc_priority: 4
toc_title: ClickHouse历史
---
# ClickHouse历史 {#clickhouseli-shi}
ClickHouse最初是为 [YandexMetrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止该系统在ClickHouse中有超过13万亿条记录并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。

View File

@ -1,3 +1,8 @@
---
toc_priority: 3
toc_title: ClickHouse性能
---
# 性能 {#performance}
根据Yandex的内部测试结果ClickHouse表现出了比同类可比较产品更优的性能。你可以在 [这里](https://clickhouse.tech/benchmark/dbms/) 查看具体的测试结果。

View File

@ -33,10 +33,10 @@ ClickHouse 收集的指标项:
- 服务用于计算的资源占用的各种指标。
- 关于查询处理的常见统计信息。
可以在 [系统指标](system-tables/metrics.md#system_tables-metrics) [系统事件](system-tables/events.md#system_tables-events) 以及[系统异步指标](system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。
可以在[系统指标](system-tables/metrics.md#system_tables-metrics)[系统事件](system-tables/events.md#system_tables-events)以及[系统异步指标](system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics)等系统表查看所有的指标项。
可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server-configuration-parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。
可以配置ClickHouse向[Graphite](https://github.com/graphite-project)推送监控信息并导入指标。参考[Graphite监控](server-configuration-parameters/settings.md#server_configuration_parameters-graphite)配置文件。在配置指标导出之前,需要参考[Graphite官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建Graphite服务。
此外您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/ping` 如果服务器可用,它将以 `200 OK` 响应。
此外您可以通过HTTP API监视服务器可用性。将HTTP GET请求发送到`/ping`。如果服务器可用,它将以 `200 OK` 响应。
要监视服务器集群的配置,应设置[max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回200 OK。 如果副本滞后,请求将返回 `503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。
要监视服务器集群的配置,应设置[max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries)参数并使用HTTP资源`/replicas_status`。 如果副本可用,并且不延迟在其他副本之后,则对`/replicas_status`的请求将返回`200 OK`。 如果副本滞后,请求将返回`503 HTTP_SERVICE_UNAVAILABLE`,包括有关待办事项大小的信息。

View File

@ -3,18 +3,18 @@ toc_priority: 60
toc_title: clickhouse-local
---
# ツ环板-ョツ嘉ッツ偲 {#clickhouse-local}
# ClickHouse Local {#clickhouse-local}
`clickhouse-local` 程序使您能够对本地文件执行快速处理而无需部署和配置ClickHouse服务器。
`clickhouse-local`模式可以使您能够对本地文件执行快速处理而无需部署和配置ClickHouse服务器。
接受表示表的数据并使用以下方式查询它们 [ツ环板ECTョツ嘉ッツ偲](../../operations/utilities/clickhouse-local.md).
[ClickHouse SQL语法](../../operations/utilities/clickhouse-local.md)支持对表格数据的查询.
`clickhouse-local` 使用与ClickHouse server相同的核心因此它支持大多数功能以及相同的格式和表引擎。
`clickhouse-local`使用与ClickHouse Server相同的核心因此它支持大多数功能以及相同的格式和表引擎。
默认情况下 `clickhouse-local` 不能访问同一主机上的数据,但它支持使用以下方式加载服务器配置 `--config-file` 争论
默认情况下`clickhouse-local`不能访问同一主机上的数据,但它支持使用`--config-file`方式加载服务器配置
!!! warning "警告"
不建议将生产服务器配置加载到 `clickhouse-local` 因为数据可以在人为错误的情况下被损坏。
不建议将生产服务器配置加载到`clickhouse-local`因为数据可以在人为错误的情况下被损坏。
## 用途 {#usage}
@ -26,21 +26,21 @@ clickhouse-local --structure "table_structure" --input-format "format_of_incomin
参数:
- `-S`, `--structure`table structure for input data.
- `-if`, `--input-format`input format, `TSV` 默认情况下
- `-f`, `--file`path to data, `stdin` 默认情况下
- `-q` `--query`queries to execute with `;` 如delimeter
- `-N`, `--table`table name where to put output data, `table` 默认情况下
- `-of`, `--format`, `--output-format`output format, `TSV` 默认情况下
- `--stacktrace`whether to dump debug output in case of exception.
- `--verbose`more details on query execution.
- `-s`disables `stderr` 记录
- `--config-file`path to configuration file in same format as for ClickHouse server, by default the configuration empty.
- `--help`arguments references for `clickhouse-local`.
- `-S`, `--structure`输入数据的表结构。
- `-if`, `--input-format`输入格式化类型, 默认是`TSV`
- `-f`, `--file`数据路径, 默认是`stdin`
- `-q` `--query`要查询的SQL语句使用`;`做分隔符
- `-N`, `--table`数据输出的表名,默认是`table`
- `-of`, `--format`, `--output-format`输出格式化类型, 默认是`TSV`
- `--stacktrace`是否在出现异常时输出栈信息。
- `--verbose`debug显示查询的详细信息。
- `-s`禁用`stderr`输出信息
- `--config-file`与ClickHouse服务器格式相同配置文件的路径默认情况下配置为空。
- `--help``clickhouse-local`使用帮助信息。
还有每个ClickHouse配置变量的参数这些变量更常用而不是 `--config-file`.
对于每个ClickHouse配置的参数也可以单独使用可以不使用`--config-file`指定。
## 例 {#examples}
## 例 {#examples}
``` bash
echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table"
@ -49,7 +49,7 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
3 4
```
前面的例子是一样的:
另一个示例,类似上一个使用示例:
``` bash
$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table"
@ -58,7 +58,22 @@ Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
3 4
```
现在让我们为每个Unix用户输出内存用户:
你可以使用`stdin`或`--file`参数, 打开任意数量的文件来使用多个文件[`file` table function](../../sql-reference/table-functions/file.md):
```bash
$ echo 1 | tee 1.tsv
1
$ echo 2 | tee 2.tsv
2
$ clickhouse-local --query "
select * from file('1.tsv', TSV, 'a int') t1
cross join file('2.tsv', TSV, 'b int') t2"
1 2
```
现在让我们查询每个Unix用户使用内存:
``` bash
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"

View File

@ -6,7 +6,7 @@
我们使用RoaringBitmap实际存储位图对象当基数小于或等于32时它使用Set保存。当基数大于32时它使用RoaringBitmap保存。这也是为什么低基数集的存储更快的原因。
有关RoaringBitmap的更多信息请参阅[呻吟声](https://github.com/RoaringBitmap/CRoaring)。
有关RoaringBitmap的更多信息请参阅[RoaringBitmap](https://github.com/RoaringBitmap/CRoaring)。
## bitmapBuild {#bitmapbuild}

View File

@ -259,5 +259,5 @@ CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
PRIMARY KEY key1, key2
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
LAYOUT(LAYOUT_NAME([param_name param_value]))
LIFETIME([MIN val1] MAX val2)
LIFETIME({MIN min_val MAX max_val | max_val})
```

View File

@ -112,6 +112,8 @@ add_subdirectory (obfuscator)
add_subdirectory (install)
add_subdirectory (git-import)
#add_subdirectory (grpc-client)
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
add_subdirectory (odbc-bridge)
endif ()

View File

@ -2515,7 +2515,7 @@ public:
{
std::string traceparent = options["opentelemetry-traceparent"].as<std::string>();
std::string error;
if (!context.getClientInfo().parseTraceparentHeader(
if (!context.getClientInfo().client_trace_context.parseTraceparentHeader(
traceparent, error))
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
@ -2526,7 +2526,7 @@ public:
if (options.count("opentelemetry-tracestate"))
{
context.getClientInfo().opentelemetry_tracestate =
context.getClientInfo().client_trace_context.tracestate =
options["opentelemetry-tracestate"].as<std::string>();
}

View File

@ -62,6 +62,9 @@ decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries)
{
std::exception_ptr exception;
if (max_tries == 0)
throw Exception("Cannot perform zero retries", ErrorCodes::LOGICAL_ERROR);
for (UInt64 try_number = 1; try_number <= max_tries; ++try_number)
{
try
@ -605,7 +608,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
settings_push.replication_alter_partitions_sync = 2;
query_alter_ast_string += " ALTER TABLE " + getQuotedTable(original_table) +
" ATTACH PARTITION " + partition_name +
((partition_name == "'all'") ? " ATTACH PARTITION ID " : " ATTACH PARTITION ") + partition_name +
" FROM " + getQuotedTable(helping_table);
LOG_DEBUG(log, "Executing ALTER query: {}", query_alter_ast_string);
@ -636,7 +639,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
if (!task_table.isReplicatedTable())
{
query_deduplicate_ast_string += " OPTIMIZE TABLE " + getQuotedTable(original_table) +
" PARTITION " + partition_name + " DEDUPLICATE;";
((partition_name == "'all'") ? " PARTITION ID " : " PARTITION ") + partition_name + " DEDUPLICATE;";
LOG_DEBUG(log, "Executing OPTIMIZE DEDUPLICATE query: {}", query_alter_ast_string);
@ -807,7 +810,7 @@ bool ClusterCopier::tryDropPartitionPiece(
DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number));
String query = "ALTER TABLE " + getQuotedTable(helping_table);
query += " DROP PARTITION " + task_partition.name + "";
query += ((task_partition.name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") + task_partition.name + "";
/// TODO: use this statement after servers will be updated up to 1.1.54310
// query += " DROP PARTITION ID '" + task_partition.name + "'";
@ -1567,7 +1570,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT
DatabaseAndTableName original_table = task_table.table_push;
DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number));
String query = "ALTER TABLE " + getQuotedTable(helping_table) + " DROP PARTITION " + partition_name;
String query = "ALTER TABLE " + getQuotedTable(helping_table) + ((partition_name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") + partition_name;
const ClusterPtr & cluster_push = task_table.cluster_push;
Settings settings_push = task_cluster->settings_push;
@ -1670,14 +1673,24 @@ void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeout
std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & timeouts, TaskShard & task_shard)
{
std::set<String> res;
createShardInternalTables(timeouts, task_shard, false);
TaskTable & task_table = task_shard.task_table;
const String & partition_name = queryToString(task_table.engine_push_partition_key_ast);
if (partition_name == "'all'")
{
res.emplace("'all'");
return res;
}
String query;
{
WriteBufferFromOwnString wb;
wb << "SELECT DISTINCT " << queryToString(task_table.engine_push_partition_key_ast) << " AS partition FROM"
wb << "SELECT DISTINCT " << partition_name << " AS partition FROM"
<< " " << getQuotedTable(task_shard.table_read_shard) << " ORDER BY partition DESC";
query = wb.str();
}
@ -1692,7 +1705,6 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
local_context.setSettings(task_cluster->settings_pull);
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().getInputStream());
std::set<String> res;
if (block)
{
ColumnWithTypeAndName & column = block.getByPosition(0);
@ -1803,7 +1815,7 @@ UInt64 ClusterCopier::executeQueryOnCluster(
if (execution_mode == ClusterExecutionMode::ON_EACH_NODE)
max_successful_executions_per_shard = 0;
std::atomic<size_t> origin_replicas_number;
std::atomic<size_t> origin_replicas_number = 0;
/// We need to execute query on one replica at least
auto do_for_shard = [&] (UInt64 shard_index, Settings shard_settings)

View File

@ -0,0 +1,7 @@
include_directories(${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(rpc_proto "${CMAKE_CURRENT_SOURCE_DIR}/../server/grpc_protos/GrpcConnection.proto" ABSOLUTE)
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${rpc_proto})
PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${rpc_proto})
add_executable(grpc-client grpc_client.cpp ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS})
target_link_libraries(grpc-client PUBLIC grpc++ PUBLIC libprotobuf PUBLIC daemon)

View File

@ -0,0 +1,173 @@
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <thread>
#include <stdlib.h>
#include <grpc++/channel.h>
#include <grpc++/client_context.h>
#include <grpc++/create_channel.h>
#include <grpc++/security/credentials.h>
#include "GrpcConnection.grpc.pb.h"
class GRPCClient
{
public:
explicit GRPCClient(std::shared_ptr<grpc::Channel> channel)
: stub_(GRPCConnection::GRPC::NewStub(channel))
{}
std::string Query(const GRPCConnection::User& userInfo,
const std::string& query,
std::vector<std::string> insert_data = {})
{
GRPCConnection::QueryRequest request;
grpc::Status status;
GRPCConnection::QueryResponse reply;
grpc::ClientContext context;
auto deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(10000);
context.set_deadline(deadline);
auto user = std::make_unique<GRPCConnection::User>(userInfo);
auto querySettigs = std::make_unique<GRPCConnection::QuerySettings>();
int id = rand();
request.set_allocated_user_info(user.release());
// interactive_delay in miliseconds
request.set_interactive_delay(1000);
querySettigs->set_query(query);
querySettigs->set_format("Values");
querySettigs->set_query_id(std::to_string(id));
querySettigs->set_data_stream((insert_data.size() != 0));
(*querySettigs->mutable_settings())["max_query_size"] ="100";
request.set_allocated_query_info(querySettigs.release());
void* got_tag = (void*)1;
bool ok = false;
std::unique_ptr<grpc::ClientReaderWriter<GRPCConnection::QueryRequest, GRPCConnection::QueryResponse> > reader(stub_->Query(&context));
reader->Write(request);
auto write = [&reply, &reader, &insert_data]()
{
GRPCConnection::QueryRequest request_insert;
for (const auto& data : insert_data)
{
request_insert.set_insert_data(data);
if (reply.exception_occured().empty())
{
reader->Write(request_insert);
}
else
{
break;
}
}
request_insert.set_insert_data("");
if (reply.exception_occured().empty())
{
reader->Write(request_insert);
}
// reader->WritesDone();
};
std::thread write_thread(write);
write_thread.detach();
while (reader->Read(&reply))
{
if (!reply.output().empty())
{
std::cout << "Query Part:\n " << id<< reply.output()<<'\n';
}
else if (reply.progress().read_rows()
|| reply.progress().read_bytes()
|| reply.progress().total_rows_to_read()
|| reply.progress().written_rows()
|| reply.progress().written_bytes())
{
std::cout << "Progress " << id<< ":{\n" << "read_rows: " << reply.progress().read_rows() << '\n'
<< "read_bytes: " << reply.progress().read_bytes() << '\n'
<< "total_rows_to_read: " << reply.progress().total_rows_to_read() << '\n'
<< "written_rows: " << reply.progress().written_rows() << '\n'
<< "written_bytes: " << reply.progress().written_bytes() << '\n';
}
else if (!reply.totals().empty())
{
std::cout << "Totals:\n " << id << " " << reply.totals() <<'\n';
}
else if (!reply.extremes().empty())
{
std::cout << "Extremes:\n " << id << " " << reply.extremes() <<'\n';
}
}
if (status.ok() && reply.exception_occured().empty())
{
return "";
}
else if (status.ok() && !reply.exception_occured().empty())
{
return reply.exception_occured();
}
else
{
return "RPC failed";
}
}
private:
std::unique_ptr<GRPCConnection::GRPC::Stub> stub_;
};
int main(int argc, char** argv)
{
GRPCConnection::User userInfo1;
userInfo1.set_user("default");
userInfo1.set_password("");
userInfo1.set_quota("default");
std::cout << "Try: " << argv[1] << std::endl;
grpc::ChannelArguments ch_args;
ch_args.SetMaxReceiveMessageSize(-1);
GRPCClient client(
grpc::CreateCustomChannel(argv[1], grpc::InsecureChannelCredentials(), ch_args));
{
std::cout << client.Query(userInfo1, "CREATE TABLE t (a UInt8) ENGINE = Memory") << std::endl;
std::cout << client.Query(userInfo1, "CREATE TABLE t (a UInt8) ENGINE = Memory") << std::endl;
std::cout << client.Query(userInfo1, "INSERT INTO t VALUES", {"(1),(2),(3)", "(4),(6),(5)"}) << std::endl;
std::cout << client.Query(userInfo1, "INSERT INTO t_not_defined VALUES", {"(1),(2),(3)", "(4),(6),(5)"}) << std::endl;
std::cout << client.Query(userInfo1, "SELECT a FROM t ORDER BY a") << std::endl;
std::cout << client.Query(userInfo1, "DROP TABLE t") << std::endl;
}
{
std::cout << client.Query(userInfo1, "SELECT count() FROM numbers(1)") << std::endl;
std::cout << client.Query(userInfo1, "SELECT 100") << std::endl;
std::cout << client.Query(userInfo1, "SELECT count() FROM numbers(10000000000)") << std::endl;
std::cout << client.Query(userInfo1, "SELECT count() FROM numbers(100)") << std::endl;
}
{
std::cout << client.Query(userInfo1, "CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = Memory;") << std::endl;
std::cout << client.Query(userInfo1, "INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);") << std::endl;
std::cout << client.Query(userInfo1, "SELECT s FROM arrays_test") << std::endl;
std::cout << client.Query(userInfo1, "DROP TABLE arrays_test") << std::endl;
std::cout << client.Query(userInfo1, "") << std::endl;
}
{//Check null return from pipe
std::cout << client.Query(userInfo1, "CREATE TABLE table2 (x UInt8, y UInt8) ENGINE = Memory;") << std::endl;
std::cout << client.Query(userInfo1, "SELECT x FROM table2") << std::endl;
std::cout << client.Query(userInfo1, "DROP TABLE table2") << std::endl;
}
{//Check Totals
std::cout << client.Query(userInfo1, "CREATE TABLE tabl (x UInt8, y UInt8) ENGINE = Memory;") << std::endl;
std::cout << client.Query(userInfo1, "INSERT INTO tabl VALUES (1, 2), (2, 4), (3, 2), (3, 3), (3, 4);") << std::endl;
std::cout << client.Query(userInfo1, "SELECT sum(x), y FROM tabl GROUP BY y WITH TOTALS") << std::endl;
std::cout << client.Query(userInfo1, "DROP TABLE tabl") << std::endl;
}
return 0;
}

View File

@ -109,6 +109,14 @@ void ODBCBridge::defineOptions(Poco::Util::OptionSet & options)
.argument("err-log-path")
.binding("logger.errorlog"));
options.addOption(Poco::Util::Option("stdout-path", "", "stdout log path, default console")
.argument("stdout-path")
.binding("logger.stdout"));
options.addOption(Poco::Util::Option("stderr-path", "", "stderr log path, default console")
.argument("stderr-path")
.binding("logger.stderr"));
using Me = std::decay_t<decltype(*this)>;
options.addOption(Poco::Util::Option("help", "", "produce this help message")
.binding("help")
@ -127,6 +135,27 @@ void ODBCBridge::initialize(Application & self)
config().setString("logger", "ODBCBridge");
/// Redirect stdout, stderr to specified files.
/// Some libraries and sanitizers write to stderr in case of errors.
const auto stdout_path = config().getString("logger.stdout", "");
if (!stdout_path.empty())
{
if (!freopen(stdout_path.c_str(), "a+", stdout))
throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path);
/// Disable buffering for stdout.
setbuf(stdout, nullptr);
}
const auto stderr_path = config().getString("logger.stderr", "");
if (!stderr_path.empty())
{
if (!freopen(stderr_path.c_str(), "a+", stderr))
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
/// Disable buffering for stderr.
setbuf(stderr, nullptr);
}
buildLoggers(config(), logger(), self.commandName());
BaseDaemon::logRevision();

View File

@ -64,6 +64,7 @@
#include <Common/ThreadFuzzer.h>
#include <Server/MySQLHandlerFactory.h>
#include <Server/PostgreSQLHandlerFactory.h>
#include <Server/ProtocolServerAdapter.h>
#if !defined(ARCADIA_BUILD)
@ -84,6 +85,11 @@
# include <Poco/Net/SecureServerSocket.h>
#endif
#if USE_GRPC
# include <Server/GRPCServer.h>
#endif
namespace CurrentMetrics
{
extern const Metric Revision;
@ -806,7 +812,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
http_params->setTimeout(settings.http_receive_timeout);
http_params->setKeepAliveTimeout(keep_alive_timeout);
std::vector<std::unique_ptr<Poco::Net::TCPServer>> servers;
std::vector<ProtocolServerAdapter> servers;
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
@ -1035,6 +1041,15 @@ int Server::main(const std::vector<std::string> & /*args*/)
LOG_INFO(log, "Listening for PostgreSQL compatibility protocol: " + address.toString());
});
#if USE_GRPC
create_server("grpc_port", [&](UInt16 port)
{
Poco::Net::SocketAddress server_address(listen_host, port);
servers.emplace_back(std::make_unique<GRPCServer>(*this, make_socket_address(listen_host, port)));
LOG_INFO(log, "Listening for gRPC protocol: " + server_address.toString());
});
#endif
/// Prometheus (if defined and not setup yet with http_port)
create_server("prometheus.port", [&](UInt16 port)
{
@ -1056,7 +1071,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->enableNamedSessions();
for (auto & server : servers)
server->start();
server.start();
{
String level_str = config().getString("text_log.level", "");
@ -1088,8 +1103,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
int current_connections = 0;
for (auto & server : servers)
{
server->stop();
current_connections += server->currentConnections();
server.stop();
current_connections += server.currentConnections();
}
if (current_connections)
@ -1109,7 +1124,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
{
current_connections = 0;
for (auto & server : servers)
current_connections += server->currentConnections();
current_connections += server.currentConnections();
if (!current_connections)
break;
sleep_current_ms += sleep_one_ms;

View File

@ -0,0 +1 @@
../../../tests/config/config.d/logging_no_rotate.xml

View File

@ -11,6 +11,9 @@
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<!-- Rotation policy
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
-->
<size>1000M</size>
<count>10</count>
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
@ -131,6 +134,34 @@
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API)
<grpc_port>9001</grpc_port>
<grpc>
<enable_ssl>true</enable_ssl> -->
<!-- The following two files are used only if enable_ssl=1
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file> -->
<!-- Whether server will request client for a certificate
<ssl_require_client_auth>true</ssl_require_client_auth> -->
<!-- The following file is used only if ssl_require_client_auth=1
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file> -->
<!-- Default compression algorithm (applied if client doesn't specify another algorithm).
Supported algorithms: none, deflate, gzip, stream_gzip
<compression>gzip</compression> -->
<!-- Default compression level (applied if client doesn't specify another level).
Supported levels: none, low, medium, high
<compression_level>high</compression_level> -->
<!-- Send/receive message size limits in bytes. -1 means unlimited
<max_send_message_size>-1</max_send_message_size>
<max_receive_message_size>4194304</max_receive_message_size>
</grpc> -->
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>

View File

@ -1,6 +1,7 @@
<html> <!-- TODO If I write DOCTYPE HTML something changes but I don't know what. -->
<head>
<meta charset="UTF-8">
<link rel="icon" href="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSI1NCIgaGVpZ2h0PSI0OCIgdmlld0JveD0iMCAwIDkgOCI+PHN0eWxlPi5ve2ZpbGw6I2ZjMH0ucntmaWxsOnJlZH08L3N0eWxlPjxwYXRoIGQ9Ik0wLDcgaDEgdjEgaC0xIHoiIGNsYXNzPSJyIi8+PHBhdGggZD0iTTAsMCBoMSB2NyBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNMiwwIGgxIHY4IGgtMSB6IiBjbGFzcz0ibyIvPjxwYXRoIGQ9Ik00LDAgaDEgdjggaC0xIHoiIGNsYXNzPSJvIi8+PHBhdGggZD0iTTYsMCBoMSB2OCBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNOCwzLjI1IGgxIHYxLjUgaC0xIHoiIGNsYXNzPSJvIi8+PC9zdmc+">
<title>ClickHouse Query</title>
<!-- Code Style:
@ -21,26 +22,11 @@
<!-- Development Roadmap:
1. Add indication that the query was sent and when the query has been finished.
Do not use any animated spinners. Just a text or check mark.
Eliminate race conditions (results from the previous query should be ignored on arrival, the previous request should be cancelled).
2. Support readonly servers.
1. Support readonly servers.
Check if readonly = 1 (with SELECT FROM system.settings) to avoid sending settings. It can be done once on address/credentials change.
It can be done in background, e.g. wait 100 ms after address/credentials change and do the check.
Also it can provide visual indication that credentials are correct.
3. Add history in localstorage. Integrate with history API.
There can be a counter in localstorage, that will be appended to location #fragment.
The 'back', 'forward' buttons in browser should work.
Also there should be UI element to list all the queries from history and select from the list.
4. Trivial sharing capabilities.
Sharing is only possible when system.query_log is accessible. Read the X-ClickHouse-QueryId from the response.
Share button will: - emit SYSTEM FLUSH LOGS if not readonly; - find the query in the query_log;
- generate an URL with the query id and: server address if not equal to the URL's host; user name if not default;
indication that password should be entered in case of non-empty password.
-->
<style type="text/css">
@ -273,6 +259,22 @@
{
color: var(--null-color);
}
#hourglass
{
display: none;
padding-left: 1rem;
font-size: 110%;
color: #888;
}
#check-mark
{
display: none;
padding-left: 1rem;
font-size: 110%;
color: #080;
}
</style>
</head>
@ -286,6 +288,8 @@
<div id="run_div">
<button class="shadow" id="run">Run</button>
<span class="hint">&nbsp;(Ctrl+Enter)</span>
<span id="hourglass"></span>
<span id="check-mark"></span>
<span id="stats"></span>
<span id="toggle-dark">🌑</span><span id="toggle-light">🌞</span>
</div>
@ -299,50 +303,117 @@
<script type="text/javascript">
/// Incremental request number. When response is received,
/// if it's request number does not equal to the current request number, response will be ignored.
/// This is to avoid race conditions.
var request_num = 0;
/// Save query in history only if it is different.
var previous_query = '';
/// Substitute the address of the server where the page is served.
if (location.protocol != 'file:') {
document.getElementById('url').value = location.origin;
}
function post()
/// Substitute user name if it's specified in the query string
var user_from_url = (new URL(window.location)).searchParams.get('user');
if (user_from_url) {
document.getElementById('user').value = user_from_url;
}
function postImpl(posted_request_num, query)
{
/// TODO: Avoid race condition on subsequent requests when responses may come out of order.
/// TODO: Check if URL already contains query string (append parameters).
var user = document.getElementById('user').value;
var password = document.getElementById('password').value;
var url = document.getElementById('url').value +
/// Ask server to allow cross-domain requests.
'?add_http_cors_header=1' +
'&user=' + encodeURIComponent(document.getElementById('user').value) +
'&password=' + encodeURIComponent(document.getElementById('password').value) +
'&user=' + encodeURIComponent(user) +
'&password=' + encodeURIComponent(password) +
'&default_format=JSONCompact' +
/// Safety settings to prevent results that browser cannot display.
'&max_result_rows=1000&max_result_bytes=10000000&result_overflow_mode=break';
var query = document.getElementById('query').value;
var xhr = new XMLHttpRequest;
xhr.open('POST', url, true);
xhr.send(query);
xhr.onreadystatechange = function()
{
if (this.readyState === XMLHttpRequest.DONE) {
if (this.status === 200) {
var json;
try { json = JSON.parse(this.response); } catch (e) {}
if (json !== undefined && json.statistics !== undefined) {
renderResult(json);
} else {
renderUnparsedResult(this.response);
}
} else {
/// TODO: Proper rendering of network errors.
renderError(this.response);
if (posted_request_num != request_num) {
return;
} else if (this.readyState === XMLHttpRequest.DONE) {
renderResponse(this.status, this.response);
/// The query is saved in browser history (in state JSON object)
/// as well as in URL fragment identifier.
if (query != previous_query) {
previous_query = query;
var title = "ClickHouse Query: " + query;
history.pushState(
{
query: query,
status: this.status,
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
},
title,
window.location.pathname + '?user=' + encodeURIComponent(user) + '#' + window.btoa(query));
document.title = title;
}
} else {
//console.log(this);
}
}
document.getElementById('check-mark').style.display = 'none';
document.getElementById('hourglass').style.display = 'inline';
xhr.send(query);
}
function renderResponse(status, response) {
document.getElementById('hourglass').style.display = 'none';
if (status === 200) {
var json;
try { json = JSON.parse(response); } catch (e) {}
if (json !== undefined && json.statistics !== undefined) {
renderResult(json);
} else {
renderUnparsedResult(response);
}
document.getElementById('check-mark').style.display = 'inline';
} else {
/// TODO: Proper rendering of network errors.
renderError(response);
}
}
window.onpopstate = function(event) {
if (!event.state) {
return;
}
document.getElementById('query').value = event.state.query;
if (!event.state.response) {
clear();
return;
}
renderResponse(event.state.status, event.state.response);
};
if (window.location.hash) {
document.getElementById('query').value = window.atob(window.location.hash.substr(1));
}
function post()
{
++request_num;
var query = document.getElementById('query').value;
postImpl(request_num, query);
}
document.getElementById('run').onclick = function()
@ -350,7 +421,7 @@
post();
}
document.getElementById('query').onkeypress = function(event)
document.onkeypress = function(event)
{
/// Firefox has code 13 for Enter and Chromium has code 10.
if (event.ctrlKey && (event.charCode == 13 || event.charCode == 10)) {
@ -372,6 +443,9 @@
document.getElementById('error').style.display = 'none';
document.getElementById('stats').innerText = '';
document.getElementById('hourglass').style.display = 'none';
document.getElementById('check-mark').style.display = 'none';
}
function renderResult(response)
@ -443,7 +517,7 @@
function renderError(response)
{
clear();
document.getElementById('error').innerText = response;
document.getElementById('error').innerText = response ? response : "No response.";
document.getElementById('error').style.display = 'block';
}

View File

@ -80,7 +80,7 @@ public:
}
if (!isUnsignedInteger(arguments[1]))
throw Exception("Second argument of aggregate function " + getName() + " must be integer.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception("Second argument of aggregate function " + getName() + " must be unsigned integer.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
if (default_value.isNull())
default_value = type->getDefault();

View File

@ -143,7 +143,7 @@ void LinearModelData::updateState()
void LinearModelData::predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const Context & context) const
@ -264,8 +264,8 @@ void Adam::merge(const IWeightsUpdater & rhs, Float64 frac, Float64 rhs_frac)
average_gradient[i] = average_gradient[i] * frac + adam_rhs.average_gradient[i] * rhs_frac;
average_squared_gradient[i] = average_squared_gradient[i] * frac + adam_rhs.average_squared_gradient[i] * rhs_frac;
}
beta1_powered_ *= adam_rhs.beta1_powered_;
beta2_powered_ *= adam_rhs.beta2_powered_;
beta1_powered *= adam_rhs.beta1_powered;
beta2_powered *= adam_rhs.beta2_powered;
}
void Adam::update(UInt64 batch_size, std::vector<Float64> & weights, Float64 & bias, Float64 learning_rate, const std::vector<Float64> & batch_gradient)
@ -282,21 +282,21 @@ void Adam::update(UInt64 batch_size, std::vector<Float64> & weights, Float64 & b
for (size_t i = 0; i != average_gradient.size(); ++i)
{
Float64 normed_gradient = batch_gradient[i] / batch_size;
average_gradient[i] = beta1_ * average_gradient[i] + (1 - beta1_) * normed_gradient;
average_squared_gradient[i] = beta2_ * average_squared_gradient[i] +
(1 - beta2_) * normed_gradient * normed_gradient;
average_gradient[i] = beta1 * average_gradient[i] + (1 - beta1) * normed_gradient;
average_squared_gradient[i] = beta2 * average_squared_gradient[i] +
(1 - beta2) * normed_gradient * normed_gradient;
}
for (size_t i = 0; i < weights.size(); ++i)
{
weights[i] += (learning_rate * average_gradient[i]) /
((1 - beta1_powered_) * (sqrt(average_squared_gradient[i] / (1 - beta2_powered_)) + eps_));
((1 - beta1_powered) * (sqrt(average_squared_gradient[i] / (1 - beta2_powered)) + eps));
}
bias += (learning_rate * average_gradient[weights.size()]) /
((1 - beta1_powered_) * (sqrt(average_squared_gradient[weights.size()] / (1 - beta2_powered_)) + eps_));
((1 - beta1_powered) * (sqrt(average_squared_gradient[weights.size()] / (1 - beta2_powered)) + eps));
beta1_powered_ *= beta1_;
beta2_powered_ *= beta2_;
beta1_powered *= beta1;
beta2_powered *= beta2;
}
void Adam::addToBatch(
@ -348,7 +348,7 @@ void Nesterov::update(UInt64 batch_size, std::vector<Float64> & weights, Float64
for (size_t i = 0; i < batch_gradient.size(); ++i)
{
accumulated_gradient[i] = accumulated_gradient[i] * alpha_ + (learning_rate * batch_gradient[i]) / batch_size;
accumulated_gradient[i] = accumulated_gradient[i] * alpha + (learning_rate * batch_gradient[i]) / batch_size;
}
for (size_t i = 0; i < weights.size(); ++i)
{
@ -375,9 +375,9 @@ void Nesterov::addToBatch(
std::vector<Float64> shifted_weights(weights.size());
for (size_t i = 0; i != shifted_weights.size(); ++i)
{
shifted_weights[i] = weights[i] + accumulated_gradient[i] * alpha_;
shifted_weights[i] = weights[i] + accumulated_gradient[i] * alpha;
}
auto shifted_bias = bias + accumulated_gradient[weights.size()] * alpha_;
auto shifted_bias = bias + accumulated_gradient[weights.size()] * alpha;
gradient_computer.compute(batch_gradient, shifted_weights, shifted_bias, l2_reg_coef, target, columns, row_num);
}
@ -411,7 +411,7 @@ void Momentum::update(UInt64 batch_size, std::vector<Float64> & weights, Float64
for (size_t i = 0; i < batch_gradient.size(); ++i)
{
accumulated_gradient[i] = accumulated_gradient[i] * alpha_ + (learning_rate * batch_gradient[i]) / batch_size;
accumulated_gradient[i] = accumulated_gradient[i] * alpha + (learning_rate * batch_gradient[i]) / batch_size;
}
for (size_t i = 0; i < weights.size(); ++i)
{
@ -448,7 +448,7 @@ void IWeightsUpdater::addToBatch(
void LogisticRegression::predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const std::vector<Float64> & weights,
@ -516,7 +516,7 @@ void LogisticRegression::compute(
void LinearRegression::predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const std::vector<Float64> & weights,

View File

@ -23,7 +23,7 @@ GradientComputer class computes gradient according to its loss function
class IGradientComputer
{
public:
IGradientComputer() {}
IGradientComputer() = default;
virtual ~IGradientComputer() = default;
@ -39,7 +39,7 @@ public:
virtual void predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const std::vector<Float64> & weights,
@ -51,7 +51,7 @@ public:
class LinearRegression : public IGradientComputer
{
public:
LinearRegression() {}
LinearRegression() = default;
void compute(
std::vector<Float64> & batch_gradient,
@ -64,7 +64,7 @@ public:
void predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const std::vector<Float64> & weights,
@ -76,7 +76,7 @@ public:
class LogisticRegression : public IGradientComputer
{
public:
LogisticRegression() {}
LogisticRegression() = default;
void compute(
std::vector<Float64> & batch_gradient,
@ -89,7 +89,7 @@ public:
void predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const std::vector<Float64> & weights,
@ -147,9 +147,9 @@ public:
class Momentum : public IWeightsUpdater
{
public:
Momentum() {}
Momentum() = default;
Momentum(Float64 alpha) : alpha_(alpha) {}
explicit Momentum(Float64 alpha_) : alpha(alpha_) {}
void update(UInt64 batch_size, std::vector<Float64> & weights, Float64 & bias, Float64 learning_rate, const std::vector<Float64> & batch_gradient) override;
@ -160,7 +160,7 @@ public:
void read(ReadBuffer & buf) override;
private:
Float64 alpha_{0.1};
Float64 alpha{0.1};
std::vector<Float64> accumulated_gradient;
};
@ -168,9 +168,9 @@ private:
class Nesterov : public IWeightsUpdater
{
public:
Nesterov() {}
Nesterov() = default;
Nesterov(Float64 alpha) : alpha_(alpha) {}
explicit Nesterov(Float64 alpha_) : alpha(alpha_) {}
void addToBatch(
std::vector<Float64> & batch_gradient,
@ -191,7 +191,7 @@ public:
void read(ReadBuffer & buf) override;
private:
const Float64 alpha_ = 0.9;
const Float64 alpha = 0.9;
std::vector<Float64> accumulated_gradient;
};
@ -201,8 +201,8 @@ class Adam : public IWeightsUpdater
public:
Adam()
{
beta1_powered_ = beta1_;
beta2_powered_ = beta2_;
beta1_powered = beta1;
beta2_powered = beta2;
}
void addToBatch(
@ -225,11 +225,11 @@ public:
private:
/// beta1 and beta2 hyperparameters have such recommended values
const Float64 beta1_ = 0.9;
const Float64 beta2_ = 0.999;
const Float64 eps_ = 0.000001;
Float64 beta1_powered_;
Float64 beta2_powered_;
const Float64 beta1 = 0.9;
const Float64 beta2 = 0.999;
const Float64 eps = 0.000001;
Float64 beta1_powered;
Float64 beta2_powered;
std::vector<Float64> average_gradient;
std::vector<Float64> average_squared_gradient;
@ -241,7 +241,7 @@ private:
class LinearModelData
{
public:
LinearModelData() {}
LinearModelData() = default;
LinearModelData(
Float64 learning_rate_,
@ -261,7 +261,7 @@ public:
void predict(
ColumnVector<Float64>::Container & container,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const Context & context) const;
@ -360,7 +360,7 @@ public:
void predictValues(
ConstAggregateDataPtr place,
IColumn & to,
ColumnsWithTypeAndName & arguments,
const ColumnsWithTypeAndName & arguments,
size_t offset,
size_t limit,
const Context & context) const override

View File

@ -61,7 +61,7 @@ public:
throw Exception("Prediction is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
virtual ~IAggregateFunction() {}
virtual ~IAggregateFunction() = default;
/** Data manipulating functions. */
@ -114,7 +114,7 @@ public:
virtual void predictValues(
ConstAggregateDataPtr /* place */,
IColumn & /*to*/,
ColumnsWithTypeAndName & /*arguments*/,
const ColumnsWithTypeAndName & /*arguments*/,
size_t /*offset*/,
size_t /*limit*/,
const Context & /*context*/) const

View File

@ -14,6 +14,7 @@ namespace DB
namespace ErrorCodes
{
extern const int TOO_LARGE_ARRAY_SIZE;
extern const int CANNOT_PARSE_INPUT_ASSERTION_FAILED;
}
@ -36,10 +37,11 @@ namespace ErrorCodes
* uses asin, which slows down the algorithm a bit.
*/
template <typename T>
class TDigest
class QuantileTDigest
{
using Value = Float32;
using Count = Float32;
using BetterFloat = Float64; // For intermediate results and sum(Count). Must have better precision, than Count
/** The centroid stores the weight of points around their mean value
*/
@ -55,13 +57,6 @@ class TDigest
, count(count_)
{}
Centroid & operator+=(const Centroid & other)
{
count += other.count;
mean += other.count * (other.mean - mean) / count;
return *this;
}
bool operator<(const Centroid & other) const
{
return mean < other.mean;
@ -71,26 +66,42 @@ class TDigest
/** :param epsilon: value \delta from the article - error in the range
* quantile 0.5 (default is 0.01, i.e. 1%)
* if you change epsilon, you must also change max_centroids
* :param max_centroids: depends on epsilon, the better accuracy, the more centroids you need
* to describe data with this accuracy. Read article before changing.
* :param max_unmerged: when accumulating count of new points beyond this
* value centroid compression is triggered
* (default is 2048, the higher the value - the
* more memory is required, but amortization of execution time increases)
* Change freely anytime.
*/
struct Params
{
Value epsilon = 0.01;
size_t max_centroids = 2048;
size_t max_unmerged = 2048;
};
/** max_centroids_deserialize should be >= all max_centroids ever used in production.
* This is security parameter, preventing allocation of too much centroids in deserialize, so can be relatively large.
*/
static constexpr size_t max_centroids_deserialize = 65536;
Params params;
static constexpr Params params{};
/// The memory will be allocated to several elements at once, so that the state occupies 64 bytes.
static constexpr size_t bytes_in_arena = 128 - sizeof(PODArray<Centroid>) - sizeof(Count) - sizeof(UInt32);
static constexpr size_t bytes_in_arena = 128 - sizeof(PODArray<Centroid>) - sizeof(BetterFloat) - sizeof(size_t); // If alignment is imperfect, sizeof(TDigest) will be more than naively expected
using Centroids = PODArrayWithStackMemory<Centroid, bytes_in_arena>;
Centroids centroids;
Count count = 0;
UInt32 unmerged = 0;
BetterFloat count = 0;
size_t unmerged = 0;
/** Linear interpolation at the point x on the line (x1, y1)..(x2, y2)
*/
static Value interpolate(Value x, Value x1, Value y1, Value x2, Value y2)
{
double k = (x - x1) / (x2 - x1);
return y1 + k * (y2 - y1);
}
struct RadixSortTraits
{
@ -111,15 +122,56 @@ class TDigest
};
/** Adds a centroid `c` to the digest
* centroid must be valid, validity is checked in add(), deserialize() and is maintained by compress()
*/
void addCentroid(const Centroid & c)
{
centroids.push_back(c);
count += c.count;
++unmerged;
if (unmerged >= params.max_unmerged)
if (unmerged > params.max_unmerged)
compress();
}
void compressBrute()
{
if (centroids.size() <= params.max_centroids)
return;
const size_t batch_size = (centroids.size() + params.max_centroids - 1) / params.max_centroids; // at least 2
auto l = centroids.begin();
auto r = std::next(l);
BetterFloat sum = 0;
BetterFloat l_mean = l->mean; // We have high-precision temporaries for numeric stability
BetterFloat l_count = l->count;
size_t batch_pos = 0;
for (;r != centroids.end(); ++r)
{
if (batch_pos < batch_size - 1)
{
/// The left column "eats" the right. Middle of the batch
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
l->mean = l_mean;
l->count = l_count;
batch_pos += 1;
}
else
{
// End of the batch, start the next one
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
/// We skip all the values "eaten" earlier.
*l = *r;
l_mean = l->mean;
l_count = l->count;
batch_pos = 0;
}
}
count = sum + l_count; // Update count, it might be different due to += inaccuracy
centroids.resize(l - centroids.begin() + 1);
// Here centroids.size() <= params.max_centroids
}
public:
/** Performs compression of accumulated centroids
@ -128,74 +180,92 @@ public:
*/
void compress()
{
if (unmerged > 0)
if (unmerged > 0 || centroids.size() > params.max_centroids)
{
// unmerged > 0 implies centroids.size() > 0, hence *l is valid below
RadixSort<RadixSortTraits>::executeLSD(centroids.data(), centroids.size());
if (centroids.size() > 3)
/// A pair of consecutive bars of the histogram.
auto l = centroids.begin();
auto r = std::next(l);
const BetterFloat count_epsilon_4 = count * params.epsilon * 4; // Compiler is unable to do this optimization
BetterFloat sum = 0;
BetterFloat l_mean = l->mean; // We have high-precision temporaries for numeric stability
BetterFloat l_count = l->count;
while (r != centroids.end())
{
/// A pair of consecutive bars of the histogram.
auto l = centroids.begin();
auto r = std::next(l);
Count sum = 0;
while (r != centroids.end())
if (l->mean == r->mean) // Perfect aggregation (fast). We compare l->mean, not l_mean, to avoid identical elements after compress
{
// we use quantile which gives us the smallest error
/// The ratio of the part of the histogram to l, including the half l to the entire histogram. That is, what level quantile in position l.
Value ql = (sum + l->count * 0.5) / count;
Value err = ql * (1 - ql);
/// The ratio of the portion of the histogram to l, including l and half r to the entire histogram. That is, what level is the quantile in position r.
Value qr = (sum + l->count + r->count * 0.5) / count;
Value err2 = qr * (1 - qr);
if (err > err2)
err = err2;
Value k = 4 * count * err * params.epsilon;
/** The ratio of the weight of the glued column pair to all values is not greater,
* than epsilon multiply by a certain quadratic coefficient, which in the median is 1 (4 * 1/2 * 1/2),
* and at the edges decreases and is approximately equal to the distance to the edge * 4.
*/
if (l->count + r->count <= k)
{
// it is possible to merge left and right
/// The left column "eats" the right.
*l += *r;
}
else
{
// not enough capacity, check the next pair
sum += l->count;
++l;
/// We skip all the values "eaten" earlier.
if (l != r)
*l = *r;
}
l_count += r->count;
l->count = l_count;
++r;
continue;
}
// we use quantile which gives us the smallest error
/// At the end of the loop, all values to the right of l were "eaten".
centroids.resize(l - centroids.begin() + 1);
/// The ratio of the part of the histogram to l, including the half l to the entire histogram. That is, what level quantile in position l.
BetterFloat ql = (sum + l_count * 0.5) / count;
BetterFloat err = ql * (1 - ql);
/// The ratio of the portion of the histogram to l, including l and half r to the entire histogram. That is, what level is the quantile in position r.
BetterFloat qr = (sum + l_count + r->count * 0.5) / count;
BetterFloat err2 = qr * (1 - qr);
if (err > err2)
err = err2;
BetterFloat k = count_epsilon_4 * err;
/** The ratio of the weight of the glued column pair to all values is not greater,
* than epsilon multiply by a certain quadratic coefficient, which in the median is 1 (4 * 1/2 * 1/2),
* and at the edges decreases and is approximately equal to the distance to the edge * 4.
*/
if (l_count + r->count <= k)
{
// it is possible to merge left and right
/// The left column "eats" the right.
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
l->mean = l_mean;
l->count = l_count;
}
else
{
// not enough capacity, check the next pair
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
/// We skip all the values "eaten" earlier.
if (l != r)
*l = *r;
l_mean = l->mean;
l_count = l->count;
}
++r;
}
count = sum + l_count; // Update count, it might be different due to += inaccuracy
/// At the end of the loop, all values to the right of l were "eaten".
centroids.resize(l - centroids.begin() + 1);
unmerged = 0;
}
// Ensures centroids.size() < max_centroids, independent of unprovable floating point blackbox above
compressBrute();
}
/** Adds to the digest a change in `x` with a weight of `cnt` (default 1)
*/
void add(T x, UInt64 cnt = 1)
{
addCentroid(Centroid(Value(x), Count(cnt)));
auto vx = static_cast<Value>(x);
if (cnt == 0 || std::isnan(vx))
return; // Count 0 breaks compress() assumptions, Nan breaks sort(). We treat them as no sample.
addCentroid(Centroid{vx, static_cast<Count>(cnt)});
}
void merge(const TDigest & other)
void merge(const QuantileTDigest & other)
{
for (const auto & c : other.centroids)
addCentroid(c);
@ -213,89 +283,23 @@ public:
size_t size = 0;
readVarUInt(size, buf);
if (size > params.max_unmerged)
if (size > max_centroids_deserialize)
throw Exception("Too large t-digest centroids size", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
centroids.resize(size);
buf.read(reinterpret_cast<char *>(centroids.data()), size * sizeof(centroids[0]));
count = 0;
for (const auto & c : centroids)
count += c.count;
}
Count getCount()
{
return count;
}
const Centroids & getCentroids() const
{
return centroids;
}
void reset()
{
centroids.resize(0);
count = 0;
unmerged = 0;
}
};
template <typename T>
class QuantileTDigest
{
using Value = Float32;
using Count = Float32;
centroids.resize(size);
// From now, TDigest will be in invalid state if exception is thrown.
buf.read(reinterpret_cast<char *>(centroids.data()), size * sizeof(centroids[0]));
/** We store two t-digests. When an amount of elements in sub_tdigest become more than merge_threshold
* we merge sub_tdigest in main_tdigest and reset sub_tdigest. This method is needed to decrease an amount of
* centroids in t-digest (experiments show that after merge_threshold the size of t-digest significantly grows,
* but merging two big t-digest decreases it).
*/
TDigest<T> main_tdigest;
TDigest<T> sub_tdigest;
size_t merge_threshold = 1e7;
/** Linear interpolation at the point x on the line (x1, y1)..(x2, y2)
*/
static Value interpolate(Value x, Value x1, Value y1, Value x2, Value y2)
{
double k = (x - x1) / (x2 - x1);
return y1 + k * (y2 - y1);
}
void mergeTDigests()
{
main_tdigest.merge(sub_tdigest);
sub_tdigest.reset();
}
public:
void add(T x, UInt64 cnt = 1)
{
if (sub_tdigest.getCount() >= merge_threshold)
mergeTDigests();
sub_tdigest.add(x, cnt);
}
void merge(const QuantileTDigest & other)
{
mergeTDigests();
main_tdigest.merge(other.main_tdigest);
main_tdigest.merge(other.sub_tdigest);
}
void serialize(WriteBuffer & buf)
{
mergeTDigests();
main_tdigest.serialize(buf);
}
void deserialize(ReadBuffer & buf)
{
sub_tdigest.reset();
main_tdigest.deserialize(buf);
for (const auto & c : centroids)
{
if (c.count <= 0 || std::isnan(c.count) || std::isnan(c.mean)) // invalid count breaks compress(), invalid mean breaks sort()
throw Exception("Invalid centroid " + std::to_string(c.count) + ":" + std::to_string(c.mean), ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED);
count += c.count;
}
compress(); // Allows reading/writing TDigests with different epsilon/max_centroids params
}
/** Calculates the quantile q [0, 1] based on the digest.
@ -304,18 +308,15 @@ public:
template <typename ResultType>
ResultType getImpl(Float64 level)
{
mergeTDigests();
auto & centroids = main_tdigest.getCentroids();
if (centroids.empty())
return std::is_floating_point_v<ResultType> ? NAN : 0;
main_tdigest.compress();
compress();
if (centroids.size() == 1)
return centroids.front().mean;
Float64 x = level * main_tdigest.getCount();
Float64 x = level * count;
Float64 prev_x = 0;
Count sum = 0;
Value prev_mean = centroids.front().mean;
@ -343,9 +344,6 @@ public:
template <typename ResultType>
void getManyImpl(const Float64 * levels, const size_t * levels_permutation, size_t size, ResultType * result)
{
mergeTDigests();
auto & centroids = main_tdigest.getCentroids();
if (centroids.empty())
{
for (size_t result_num = 0; result_num < size; ++result_num)
@ -353,7 +351,7 @@ public:
return;
}
main_tdigest.compress();
compress();
if (centroids.size() == 1)
{
@ -362,7 +360,7 @@ public:
return;
}
Float64 x = levels[levels_permutation[0]] * main_tdigest.getCount();
Float64 x = levels[levels_permutation[0]] * count;
Float64 prev_x = 0;
Count sum = 0;
Value prev_mean = centroids.front().mean;
@ -380,7 +378,7 @@ public:
if (result_num >= size)
return;
x = levels[levels_permutation[result_num]] * main_tdigest.getCount();
x = levels[levels_permutation[result_num]] * count;
}
sum += c.count;

View File

@ -382,6 +382,10 @@ if (USE_PROTOBUF)
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR})
endif ()
if (USE_GRPC)
dbms_target_link_libraries (PUBLIC clickhouse_grpc_protos)
endif()
if (USE_HDFS)
target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR})

View File

@ -207,6 +207,12 @@ void Connection::receiveHello()
/// Receive hello packet.
UInt64 packet_type = 0;
/// Prevent read after eof in readVarUInt in case of reset connection
/// (Poco should throw such exception while reading from socket but
/// sometimes it doesn't for unknown reason)
if (in->eof())
throw Poco::Net::NetException("Connection reset by peer");
readVarUInt(packet_type, *in);
if (packet_type == Protocol::Server::Hello)
{

View File

@ -161,7 +161,7 @@ MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr colum
return res;
}
MutableColumnPtr ColumnAggregateFunction::predictValues(ColumnsWithTypeAndName & arguments, const Context & context) const
MutableColumnPtr ColumnAggregateFunction::predictValues(const ColumnsWithTypeAndName & arguments, const Context & context) const
{
MutableColumnPtr res = func->getReturnTypeToPredict()->createColumn();
res->reserve(data.size());

View File

@ -119,7 +119,7 @@ public:
const char * getFamilyName() const override { return "AggregateFunction"; }
TypeIndex getDataType() const override { return TypeIndex::AggregateFunction; }
MutableColumnPtr predictValues(ColumnsWithTypeAndName & arguments, const Context & context) const;
MutableColumnPtr predictValues(const ColumnsWithTypeAndName & arguments, const Context & context) const;
size_t size() const override
{

Some files were not shown because too many files have changed in this diff Show More