Merge branch 'master' into romanzhukov-DOCSUP-7086-add-s3-translation-ru

This commit is contained in:
Roman Bug 2021-04-04 12:45:39 +03:00 committed by GitHub
commit e0fcc57ffc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1575 changed files with 30957 additions and 90299 deletions

5
.gitmodules vendored
View File

@ -93,7 +93,7 @@
url = https://github.com/ClickHouse-Extras/libunwind.git
[submodule "contrib/simdjson"]
path = contrib/simdjson
url = https://github.com/ClickHouse-Extras/simdjson.git
url = https://github.com/simdjson/simdjson.git
[submodule "contrib/rapidjson"]
path = contrib/rapidjson
url = https://github.com/ClickHouse-Extras/rapidjson
@ -221,3 +221,6 @@
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse-Extras/NuRaft.git
[submodule "contrib/datasketches-cpp"]
path = contrib/datasketches-cpp
url = https://github.com/ClickHouse-Extras/datasketches-cpp.git

View File

@ -39,6 +39,8 @@ else()
set(RECONFIGURE_MESSAGE_LEVEL STATUS)
endif()
enable_language(C CXX ASM)
include (cmake/arch.cmake)
include (cmake/target.cmake)
include (cmake/tools.cmake)
@ -248,19 +250,27 @@ if (ARCH_NATIVE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
endif ()
if (COMPILER_GCC OR COMPILER_CLANG)
# to make numeric_limits<__int128> works with GCC
set (_CXX_STANDARD "gnu++2a")
else()
set (_CXX_STANDARD "c++2a")
endif()
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now
# set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
if (COMPILER_GCC OR COMPILER_CLANG)
# to make numeric_limits<__int128> works with GCC
set (_CXX_STANDARD "gnu++2a")
else ()
set (_CXX_STANDARD "c++2a")
endif ()
set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
set (CMAKE_CXX_STANDARD_REQUIRED ON)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
else ()
set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
set (CMAKE_CXX_STANDARD_REQUIRED ON)
endif ()
set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON)
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_GCC OR COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
@ -501,6 +511,7 @@ include (cmake/find/msgpack.cmake)
include (cmake/find/cassandra.cmake)
include (cmake/find/sentry.cmake)
include (cmake/find/stats.cmake)
include (cmake/find/datasketches.cmake)
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
find_contrib_lib(cityhash)

View File

@ -47,6 +47,10 @@ endif()
target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..)
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
endif()
# Allow explicit fallback to readline
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
message (STATUS "Attempt to fallback to readline explicitly")

View File

@ -853,15 +853,43 @@ public:
{
if (hours == 1)
return toStartOfHour(t);
/** We will round the hour number since the midnight.
* It may split the day into non-equal intervals.
* For example, if we will round to 11-hour interval,
* the day will be split to the intervals 00:00:00..10:59:59, 11:00:00..21:59:59, 22:00:00..23:59:59.
* In case of daylight saving time or other transitions,
* the intervals can be shortened or prolonged to the amount of transition.
*/
UInt64 seconds = hours * 3600;
t = roundDown(t, seconds);
const LUTIndex index = findIndex(t);
const Values & values = lut[index];
if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t;
time_t time = t - values.date;
if (time >= values.time_at_offset_change())
{
/// Align to new hour numbers before rounding.
time += values.amount_of_offset_change();
time = time / seconds * seconds;
/// TODO check if it's correct.
return toStartOfHour(t);
/// Should subtract the shift back but only if rounded time is not before shift.
if (time >= values.time_at_offset_change())
{
time -= values.amount_of_offset_change();
/// With cutoff at the time of the shift. Otherwise we may end up with something like 23:00 previous day.
if (time < values.time_at_offset_change())
time = values.time_at_offset_change();
}
}
else
{
time = time / seconds * seconds;
}
return values.date + time;
}
inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const
@ -869,6 +897,14 @@ public:
if (minutes == 1)
return toStartOfMinute(t);
/** In contrast to "toStartOfHourInterval" function above,
* the minute intervals are not aligned to the midnight.
* You will get unexpected results if for example, you round down to 60 minute interval
* and there was a time shift to 30 minutes.
*
* But this is not specified in docs and can be changed in future.
*/
UInt64 seconds = 60 * minutes;
return roundDown(t, seconds);
}
@ -1069,11 +1105,11 @@ public:
}
template <typename DateOrTime>
inline LUTIndex addMonthsIndex(DateOrTime v, Int64 delta) const
inline LUTIndex NO_SANITIZE_UNDEFINED addMonthsIndex(DateOrTime v, Int64 delta) const
{
const Values & values = lut[toLUTIndex(v)];
Int64 month = static_cast<Int64>(values.month) + delta;
Int64 month = values.month + delta;
if (month > 0)
{

View File

@ -25,6 +25,12 @@ namespace common
return x - y;
}
template <typename T>
inline auto NO_SANITIZE_UNDEFINED negateIgnoreOverflow(T x)
{
return -x;
}
template <typename T>
inline bool addOverflow(T x, T y, T & res)
{

View File

@ -1,45 +1,28 @@
// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
#include <common/setTerminalEcho.h>
#include <common/errnoToString.h>
#include <stdexcept>
#include <cstring>
#include <string>
#ifdef WIN32
#include <windows.h>
#else
#include <termios.h>
#include <unistd.h>
#include <errno.h>
#endif
void setTerminalEcho(bool enable)
{
#ifdef WIN32
auto handle = GetStdHandle(STD_INPUT_HANDLE);
DWORD mode;
if (!GetConsoleMode(handle, &mode))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + std::to_string(GetLastError()));
/// Obtain terminal attributes,
/// toggle the ECHO flag
/// and set them back.
if (!enable)
mode &= ~ENABLE_ECHO_INPUT;
else
mode |= ENABLE_ECHO_INPUT;
struct termios tty{};
if (!SetConsoleMode(handle, mode))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + std::to_string(GetLastError()));
#else
struct termios tty;
if (tcgetattr(STDIN_FILENO, &tty))
if (0 != tcgetattr(STDIN_FILENO, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
if (!enable)
tty.c_lflag &= ~ECHO;
else
tty.c_lflag |= ECHO;
auto ret = tcsetattr(STDIN_FILENO, TCSANOW, &tty);
if (ret)
if (enable)
tty.c_lflag |= ECHO;
else
tty.c_lflag &= ~ECHO;
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
#endif
}

View File

@ -5,6 +5,11 @@ add_library (daemon
)
target_include_directories (daemon PUBLIC ..)
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
endif()
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
if (USE_SENTRY)

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54449)
SET(VERSION_REVISION 54450)
SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 4)
SET(VERSION_MINOR 5)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH af2135ef9dc72f16fa4f229b731262c3f0a8bbdc)
SET(VERSION_DESCRIBE v21.4.1.1-prestable)
SET(VERSION_STRING 21.4.1.1)
SET(VERSION_GITHASH 3827789b3d8fd2021952e57e5110343d26daa1a1)
SET(VERSION_DESCRIBE v21.5.1.1-prestable)
SET(VERSION_STRING 21.5.1.1)
# end of autochange

View File

@ -1,4 +1,8 @@
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
if(ARCH_AMD64 OR ARCH_ARM)
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
elseif(ENABLE_BASE64)
message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64")
endif()
if (NOT ENABLE_BASE64)
return()

View File

@ -0,0 +1,29 @@
option (ENABLE_DATASKETCHES "Enable DataSketches" ${ENABLE_LIBRARIES})
if (ENABLE_DATASKETCHES)
option (USE_INTERNAL_DATASKETCHES_LIBRARY "Set to FALSE to use system DataSketches library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/CMakeLists.txt")
if (USE_INTERNAL_DATASKETCHES_LIBRARY)
message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init --recursive")
endif()
set(MISSING_INTERNAL_DATASKETCHES_LIBRARY 1)
set(USE_INTERNAL_DATASKETCHES_LIBRARY 0)
endif()
if (USE_INTERNAL_DATASKETCHES_LIBRARY)
set(DATASKETCHES_LIBRARY theta)
set(DATASKETCHES_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include" "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
elseif (NOT MISSING_INTERNAL_DATASKETCHES_LIBRARY)
find_library(DATASKETCHES_LIBRARY theta)
find_path(DATASKETCHES_INCLUDE_DIR NAMES theta_sketch.hpp PATHS ${DATASKETCHES_INCLUDE_PATHS})
endif()
if (DATASKETCHES_LIBRARY AND DATASKETCHES_INCLUDE_DIR)
set(USE_DATASKETCHES 1)
endif()
endif()
message (STATUS "Using datasketches=${USE_DATASKETCHES}: ${DATASKETCHES_INCLUDE_DIR} : ${DATASKETCHES_LIBRARY}")

View File

@ -1,7 +1,7 @@
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT OS_DARWIN)
if(ARCH_AMD64 AND NOT OS_FREEBSD AND NOT OS_DARWIN)
option(ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${ENABLE_LIBRARIES})
elseif(ENABLE_FASTOPS)
message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is not supported on ARM, FreeBSD and Darwin")
message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is supported on x86_64 only, and not FreeBSD or Darwin")
endif()
if(NOT ENABLE_FASTOPS)

View File

@ -1,4 +1,4 @@
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF)
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF AND NOT ARCH_PPC64LE)
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
elseif(ENABLE_HDFS OR USE_INTERNAL_HDFS3_LIBRARY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")

View File

@ -62,6 +62,7 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
if (
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" )
)

View File

@ -1,7 +1,7 @@
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
if(NOT OS_FREEBSD AND NOT APPLE)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
elseif(ENABLE_S3 OR USE_INTERNAL_AWS_S3_LIBRARY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on ARM, Apple or FreeBSD")
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on Apple or FreeBSD")
endif()
if(NOT ENABLE_S3)

View File

@ -6,7 +6,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
else ()
set (BUILTINS_LIBRARY "-lgcc")
endif ()

View File

@ -86,8 +86,3 @@ if (LINKER_NAME)
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()
if (ARCH_PPC64LE)
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -11,11 +11,6 @@ if (NOT MSVC)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
endif ()
if (USE_DEBUG_HELPERS)
set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
endif ()
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# Intended for exploration of new compiler warnings that may be found useful.
# Applies to clang only

View File

@ -215,15 +215,17 @@ if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
# Yes it is set globally, but this is not enough, since llvm will add -std=c++11 after default
# And c++2a cannot be used, due to ambiguous operator !=
if (COMPILER_GCC OR COMPILER_CLANG)
set (_CXX_STANDARD "gnu++17")
else()
set (_CXX_STANDARD "c++17")
endif()
set (LLVM_CXX_STD ${_CXX_STANDARD} CACHE STRING "" FORCE)
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
set (CMAKE_CXX_STANDARD 17)
add_subdirectory (llvm/llvm)
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
unset (CMAKE_CXX_STANDARD_bak)
target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
endif ()
@ -280,7 +282,14 @@ if (USE_AMQPCPP)
add_subdirectory (amqpcpp-cmake)
endif()
if (USE_CASSANDRA)
# Need to use C++17 since the compilation is not possible with C++20 currently.
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
set (CMAKE_CXX_STANDARD 17)
add_subdirectory (cassandra)
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
unset (CMAKE_CXX_STANDARD_bak)
endif()
# Should go before:

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 3d3683e77753cfe015a05fae95ddf418e19f59e1
Subproject commit 70468326ad5d72e9497944838484c591dae054ea

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit 744bdfe188f018e5e05f5deebd4e9ee0a7706cf4
Subproject commit 616b3dc76a0c8450b4027ded8a78e9619d7c845f

View File

@ -160,6 +160,12 @@ if (NOT EXTERNAL_BOOST_FOUND)
enable_language(ASM)
SET(ASM_OPTIONS "-x assembler-with-cpp")
set (SRCS_CONTEXT
${LIBRARY_DIR}/libs/context/src/dummy.cpp
${LIBRARY_DIR}/libs/context/src/execution_context.cpp
${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp
)
if (SANITIZE AND (SANITIZE STREQUAL "address" OR SANITIZE STREQUAL "thread"))
add_compile_definitions(BOOST_USE_UCONTEXT)
@ -169,39 +175,34 @@ if (NOT EXTERNAL_BOOST_FOUND)
add_compile_definitions(BOOST_USE_TSAN)
endif()
set (SRCS_CONTEXT
set (SRCS_CONTEXT ${SRCS_CONTEXT}
${LIBRARY_DIR}/libs/context/src/fiber.cpp
${LIBRARY_DIR}/libs/context/src/continuation.cpp
${LIBRARY_DIR}/libs/context/src/dummy.cpp
${LIBRARY_DIR}/libs/context/src/execution_context.cpp
${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp
)
elseif (ARCH_ARM)
set (SRCS_CONTEXT
endif()
if (ARCH_ARM)
set (SRCS_CONTEXT ${SRCS_CONTEXT}
${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/ontop_arm64_aapcs_elf_gas.S
${LIBRARY_DIR}/libs/context/src/dummy.cpp
${LIBRARY_DIR}/libs/context/src/execution_context.cpp
${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp
)
elseif (ARCH_PPC64LE)
set (SRCS_CONTEXT ${SRCS_CONTEXT}
${LIBRARY_DIR}/libs/context/src/asm/jump_ppc64_sysv_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S
)
elseif(OS_DARWIN)
set (SRCS_CONTEXT
set (SRCS_CONTEXT ${SRCS_CONTEXT}
${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S
${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_macho_gas.S
${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_macho_gas.S
${LIBRARY_DIR}/libs/context/src/dummy.cpp
${LIBRARY_DIR}/libs/context/src/execution_context.cpp
${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp
)
else()
set (SRCS_CONTEXT
set (SRCS_CONTEXT ${SRCS_CONTEXT}
${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_elf_gas.S
${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S
${LIBRARY_DIR}/libs/context/src/dummy.cpp
${LIBRARY_DIR}/libs/context/src/execution_context.cpp
${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp
)
endif()

View File

@ -97,12 +97,19 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS)
set(TZ_OBJS ${TZ_OBJS} ${TZ_OBJ})
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
add_custom_command(OUTPUT ${TZ_OBJ}
COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}
COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS}
# PPC64LE fails to do this with objcopy, use ld or lld instead
if (ARCH_PPC64LE)
add_custom_command(OUTPUT ${TZ_OBJ}
COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}
COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o ${TZ_OBJ} ${TIMEZONE_ID}
COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID})
else()
add_custom_command(OUTPUT ${TZ_OBJ}
COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}
COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS}
--rename-section .data=.rodata,alloc,load,readonly,data,contents ${TIMEZONE_ID} ${TZ_OBJ}
COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID})
COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID})
endif()
set_source_files_properties(${TZ_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true)
endforeach(TIMEZONE)

1
contrib/datasketches-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit f915d35b2de676683493c86c585141a1e1c83334

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 7436366ceb341ba5c00ea29f1645e02a2b70bf93
Subproject commit 8d558f03fe370240081424fafa76cdc9301ea14b

View File

@ -1,7 +1,7 @@
if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN))
if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN))
if (ENABLE_JEMALLOC)
message (${RECONFIGURE_MESSAGE_LEVEL}
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64 or aarch64 on linux or freebsd.")
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64 or ppc64le on linux or freebsd.")
endif()
set (ENABLE_JEMALLOC OFF)
else()
@ -107,6 +107,8 @@ if (ARCH_AMD64)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_x86_64")
elseif (ARCH_ARM)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64")
elseif (ARCH_PPC64LE)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
else ()
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
endif ()

View File

@ -0,0 +1,367 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
/* #undef JEMALLOC_PREFIX */
/* #undef JEMALLOC_CPREFIX */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
#define JEMALLOC_OVERRIDE___LIBC_FREE
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#define HAVE_CPU_SPINWAIT 0
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#define LG_VADDR 64
/* Defined if C11 atomics are available. */
#define JEMALLOC_C11_ATOMICS 1
/* Defined if GCC __atomic atomics are available. */
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
/* Defined if GCC __sync atomics are available. */
#define JEMALLOC_GCC_SYNC_ATOMICS 1
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/* Defined if syscall(2) is usable. */
#define JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
// #define JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#define JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 16
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#define LG_HUGEPAGE 21
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#define JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* popcount*() functions to use for bitmapping.
*/
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
/* #undef JEMALLOC_LOG */
/*
* If defined, use readlinkat() (instead of readlink()) to follow
* /etc/malloc_conf.
*/
/* #undef JEMALLOC_READLINKAT */
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#define JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#define JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
#define JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
/* #undef JEMALLOC_THP */
/* Define if operating system has alloca.h header. */
#define JEMALLOC_HAS_ALLOCA_H 1
/* C99 restrict keyword supported. */
#define JEMALLOC_HAS_RESTRICT 1
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 3
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#define JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
/* pthread support */
#define JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#define JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#define JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#define JEMALLOC_BACKGROUND_THREAD 1
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC 1
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
/* Performs additional safety checks when defined. */
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -1,11 +1,9 @@
if (NOT ARCH_ARM)
if(ARCH_AMD64)
option (ENABLE_CPUID "Enable libcpuid library (only internal)" ${ENABLE_LIBRARIES})
endif()
if (ARCH_ARM AND ENABLE_CPUID)
message (${RECONFIGURE_MESSAGE_LEVEL} "cpuid is not supported on ARM")
elseif(ENABLE_CPUID)
message (${RECONFIGURE_MESSAGE_LEVEL} "libcpuid is only supported on x86_64")
set (ENABLE_CPUID 0)
endif ()
endif()
if (NOT ENABLE_CPUID)
add_library (cpuid INTERFACE)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,63 @@
/* include/lber_types.h. Generated from lber_types.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LBER types
*/
#ifndef _LBER_TYPES_H
#define _LBER_TYPES_H
#include <ldap_cdefs.h>
LDAP_BEGIN_DECL
/* LBER boolean, enum, integers (32 bits or larger) */
#define LBER_INT_T int
/* LBER tags (32 bits or larger) */
#define LBER_TAG_T long
/* LBER socket descriptor */
#define LBER_SOCKET_T int
/* LBER lengths (32 bits or larger) */
#define LBER_LEN_T long
/* ------------------------------------------------------------ */
/* booleans, enumerations, and integers */
typedef LBER_INT_T ber_int_t;
/* signed and unsigned versions */
typedef signed LBER_INT_T ber_sint_t;
typedef unsigned LBER_INT_T ber_uint_t;
/* tags */
typedef unsigned LBER_TAG_T ber_tag_t;
/* "socket" descriptors */
typedef LBER_SOCKET_T ber_socket_t;
/* lengths */
typedef unsigned LBER_LEN_T ber_len_t;
/* signed lengths */
typedef signed LBER_LEN_T ber_slen_t;
LDAP_END_DECL
#endif /* _LBER_TYPES_H */

View File

@ -0,0 +1,74 @@
/* include/ldap_config.h. Generated from ldap_config.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* This file works in conjunction with OpenLDAP configure system.
* If you do no like the values below, adjust your configure options.
*/
#ifndef _LDAP_CONFIG_H
#define _LDAP_CONFIG_H
/* directory separator */
#ifndef LDAP_DIRSEP
#ifndef _WIN32
#define LDAP_DIRSEP "/"
#else
#define LDAP_DIRSEP "\\"
#endif
#endif
/* directory for temporary files */
#if defined(_WIN32)
# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */
#elif defined( _P_tmpdir )
# define LDAP_TMPDIR _P_tmpdir
#elif defined( P_tmpdir )
# define LDAP_TMPDIR P_tmpdir
#elif defined( _PATH_TMPDIR )
# define LDAP_TMPDIR _PATH_TMPDIR
#else
# define LDAP_TMPDIR LDAP_DIRSEP "tmp"
#endif
/* directories */
#ifndef LDAP_BINDIR
#define LDAP_BINDIR "/tmp/ldap-prefix/bin"
#endif
#ifndef LDAP_SBINDIR
#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin"
#endif
#ifndef LDAP_DATADIR
#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap"
#endif
#ifndef LDAP_SYSCONFDIR
#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap"
#endif
#ifndef LDAP_LIBEXECDIR
#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec"
#endif
#ifndef LDAP_MODULEDIR
#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap"
#endif
#ifndef LDAP_RUNDIR
#define LDAP_RUNDIR "/tmp/ldap-prefix/var"
#endif
#ifndef LDAP_LOCALEDIR
#define LDAP_LOCALEDIR ""
#endif
#endif /* _LDAP_CONFIG_H */

View File

@ -0,0 +1,61 @@
/* include/ldap_features.h. Generated from ldap_features.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LDAP Features
*/
#ifndef _LDAP_FEATURES_H
#define _LDAP_FEATURES_H 1
/* OpenLDAP API version macros */
#define LDAP_VENDOR_VERSION 20501
#define LDAP_VENDOR_VERSION_MAJOR 2
#define LDAP_VENDOR_VERSION_MINOR 5
#define LDAP_VENDOR_VERSION_PATCH X
/*
** WORK IN PROGRESS!
**
** OpenLDAP reentrancy/thread-safeness should be dynamically
** checked using ldap_get_option().
**
** The -lldap implementation is not thread-safe.
**
** The -lldap_r implementation is:
** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety)
** but also be:
** LDAP_API_FEATURE_SESSION_THREAD_SAFE
** LDAP_API_FEATURE_OPERATION_THREAD_SAFE
**
** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE
** can be used to determine if -lldap_r is available at compile
** time. You must define LDAP_THREAD_SAFE if and only if you
** link with -lldap_r.
**
** If you fail to define LDAP_THREAD_SAFE when linking with
** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap,
** provided header definitions and declarations may be incorrect.
**
*/
/* is -lldap_r available or not */
#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1
/* LDAP v2 Referrals */
/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */
#endif /* LDAP_FEATURES */

File diff suppressed because it is too large Load Diff

2
contrib/replxx vendored

@ -1 +1 @@
Subproject commit cdb6e3f2ce4464225daf9c8beeae7db98d590bdc
Subproject commit 2b24f14594d7606792b92544bb112a6322ba34d7

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 3190d66a49059092a1753dc35595923debfc1698
Subproject commit 95b4870e20be5f97d9dcf63b23b1c6f520c366c1

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (21.4.1.1) unstable; urgency=low
clickhouse (21.5.1.1) unstable; urgency=low
* Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Sat, 06 Mar 2021 14:43:27 +0300
-- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 02 Apr 2021 18:34:26 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.4.1.*
ARG version=21.5.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
@ -18,6 +18,7 @@ RUN apt-get update \
clickhouse-client=$version \
clickhouse-common-static=$version \
locales \
tzdata \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
&& apt-get clean

View File

@ -14,12 +14,8 @@ RUN apt-get update \
lsb-release \
wget \
--yes --no-install-recommends --verbose-versions \
&& cat /etc/resolv.conf \
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
&& nslookup -debug apt.llvm.org \
&& ping -c1 apt.llvm.org \
&& wget -nv --retry-connrefused --tries=10 -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
&& apt-key add /tmp/llvm-snapshot.gpg.key \
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
@ -36,10 +32,7 @@ RUN apt-get update \
software-properties-common \
--yes --no-install-recommends
RUN cat /etc/resolv.conf \
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
&& nslookup -debug apt.llvm.org \
&& apt-get update \
RUN apt-get update \
&& apt-get install \
bash \
cmake \

View File

@ -1,9 +1,24 @@
FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.4.1.*
ARG version=21.5.1.*
ARG gosu_ver=1.10
# set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
ARG deb_location_url=""
# set non-empty single_binary_location_url to create docker image
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
# for example (run on aarch64 server):
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.tech/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
# note: clickhouse-odbc-bridge is not supported there.
ARG single_binary_location_url=""
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
ARG DEBIAN_FRONTEND=noninteractive
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
# can't do chown and owners of mounted volumes should be configured externally.
@ -19,19 +34,37 @@ RUN groupadd -r clickhouse --gid=101 \
ca-certificates \
dirmngr \
gnupg \
locales \
wget \
tzdata \
&& mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
&& apt-get update \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --allow-unauthenticated --yes --no-install-recommends \
clickhouse-common-static=$version \
clickhouse-client=$version \
clickhouse-server=$version \
locales \
wget \
&& if [ -n "$deb_location_url" ]; then \
echo "installing from custom url with deb packages: $deb_location_url" \
rm -rf /tmp/clickhouse_debs \
&& mkdir -p /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
elif [ -n "$single_binary_location_url" ]; then \
echo "installing from single binary url: $single_binary_location_url" \
&& rm -rf /tmp/clickhouse_binary \
&& mkdir -p /tmp/clickhouse_binary \
&& wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \
&& chmod +x /tmp/clickhouse_binary/clickhouse \
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
else \
echo "installing from repository: $repository" \
&& apt-get update \
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends \
clickhouse-common-static=$version \
clickhouse-client=$version \
clickhouse-server=$version ; \
fi \
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \

View File

@ -21,7 +21,9 @@ RUN addgroup -S -g 101 clickhouse \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-server \
&& chmod +x /entrypoint.sh \
&& apk add --no-cache su-exec bash \
&& apk add --no-cache su-exec bash tzdata \
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
&& echo "UTC" > /etc/timezone \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
# we need to allow "others" access to clickhouse folder, because docker container

View File

@ -38,17 +38,16 @@ if ! $gosu test -f "$CLICKHOUSE_CONFIG" -a -r "$CLICKHOUSE_CONFIG"; then
exit 1
fi
# port is needed to check if clickhouse-server is ready for connections
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)"
# get CH directories locations
DATA_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=path || true)"
TMP_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=tmp_path || true)"
USER_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=user_files_path || true)"
LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.log || true)"
LOG_DIR="$(dirname "$LOG_PATH" || true)"
LOG_DIR=""
if [ -n "$LOG_PATH" ]; then LOG_DIR="$(dirname "$LOG_PATH")"; fi
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.errorlog || true)"
ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH" || true)"
ERROR_LOG_DIR=""
if [ -n "$ERROR_LOG_PATH" ]; then ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH")"; fi
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=format_schema_path || true)"
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
@ -106,6 +105,9 @@ EOT
fi
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
# port is needed to check if clickhouse-server is ready for connections
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)"
# Listen only on localhost until the initialization is done
$gosu /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
pid="$!"

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.4.1.*
ARG version=21.5.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \

View File

@ -4,9 +4,8 @@ FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
RUN apt-get update \
&& apt-get install apt-utils ca-certificates lsb-release wget gnupg apt-transport-https \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
--yes --no-install-recommends --verbose-versions \
&& echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
@ -32,8 +31,7 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \
&& chmod +x dpkg-deb \
&& cp dpkg-deb /usr/bin
RUN echo "nameserver 1.1.1.1" >> /etc/resolv.conf \
&& apt-get update \
RUN apt-get update \
&& apt-get install \
clang-${LLVM_VERSION} \
debhelper \

View File

@ -1,7 +1,7 @@
# docker build -t yandex/clickhouse-fasttest .
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=10
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=11
RUN apt-get update \
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
@ -43,20 +43,20 @@ RUN apt-get update \
clang-tidy-${LLVM_VERSION} \
cmake \
curl \
lsof \
expect \
fakeroot \
git \
gdb \
git \
gperf \
lld-${LLVM_VERSION} \
llvm-${LLVM_VERSION} \
lsof \
moreutils \
ninja-build \
psmisc \
python3 \
python3-pip \
python3-lxml \
python3-pip \
python3-requests \
python3-termcolor \
rename \

View File

@ -8,6 +8,9 @@ trap 'kill $(jobs -pr) ||:' EXIT
# that we can run the "everything else" stage from the cloned source.
stage=${stage:-}
# Compiler version, normally set by Dockerfile
export LLVM_VERSION=${LLVM_VERSION:-11}
# A variable to pass additional flags to CMake.
# Here we explicitly default it to nothing so that bash doesn't complain about
# it being undefined. Also read it as array so that we can pass an empty list
@ -70,7 +73,7 @@ function start_server
--path "$FASTTEST_DATA"
--user_files_path "$FASTTEST_DATA/user_files"
--top_level_domains_path "$FASTTEST_DATA/top_level_domains"
--test_keeper_server.log_storage_path "$FASTTEST_DATA/coordination"
--keeper_server.log_storage_path "$FASTTEST_DATA/coordination"
)
clickhouse-server "${opts[@]}" &>> "$FASTTEST_OUTPUT/server.log" &
server_pid=$!
@ -124,22 +127,26 @@ continue
function clone_root
{
git clone https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt"
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt"
(
cd "$FASTTEST_SOURCE"
if [ "$PULL_REQUEST_NUMBER" != "0" ]; then
if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
if git fetch --depth 1 origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
git checkout FETCH_HEAD
echo 'Clonned merge head'
echo "Checked out pull/$PULL_REQUEST_NUMBER/merge ($(git rev-parse FETCH_HEAD))"
else
git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/head"
git fetch --depth 1 origin "+refs/pull/$PULL_REQUEST_NUMBER/head"
git checkout "$COMMIT_SHA"
echo 'Checked out to commit'
echo "Checked out nominal SHA $COMMIT_SHA for PR $PULL_REQUEST_NUMBER"
fi
else
if [ -v COMMIT_SHA ]; then
git fetch --depth 1 origin "$COMMIT_SHA"
git checkout "$COMMIT_SHA"
echo "Checked out nominal SHA $COMMIT_SHA for master"
else
echo "Using default repository head $(git rev-parse HEAD)"
fi
fi
)
@ -181,7 +188,7 @@ function clone_submodules
)
git submodule sync
git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}"
git submodule update --depth 1 --init --recursive "${SUBMODULES_TO_UPDATE[@]}"
git submodule foreach git reset --hard
git submodule foreach git checkout @ -f
git submodule foreach git clean -xfd
@ -215,7 +222,7 @@ function run_cmake
(
cd "$FASTTEST_BUILD"
cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt"
cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER="clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="clang-${LLVM_VERSION}" "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt"
)
}
@ -223,7 +230,7 @@ function build
{
(
cd "$FASTTEST_BUILD"
time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt"
time ninja clickhouse-bundle 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt"
if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then
cp programs/clickhouse "$FASTTEST_OUTPUT/clickhouse"
fi
@ -292,6 +299,7 @@ function run_tests
01318_decrypt # Depends on OpenSSL
01663_aes_msan # Depends on OpenSSL
01667_aes_args_check # Depends on OpenSSL
01776_decrypt_aead_size_check # Depends on OpenSSL
01281_unsucceeded_insert_select_queries_counter
01292_create_user
01294_lazy_database_concurrent
@ -419,7 +427,7 @@ case "$stage" in
# See the compatibility hacks in `clone_root` stage above. Remove at the same time,
# after Nov 1, 2020.
cd "$FASTTEST_WORKSPACE"
clone_submodules | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt"
clone_submodules 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt"
;&
"run_cmake")
run_cmake
@ -430,7 +438,7 @@ case "$stage" in
"configure")
# The `install_log.txt` is also needed for compatibility with old CI task --
# if there is no log, it will decide that build failed.
configure | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
;&
"run_tests")
run_tests

View File

@ -69,11 +69,25 @@ function watchdog
killall -9 clickhouse-client ||:
}
function filter_exists
{
local path
for path in "$@"; do
if [ -e "$path" ]; then
echo "$path"
else
echo "'$path' does not exists" >&2
fi
done
}
function fuzz
{
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
# Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment.
NEW_TESTS="$(grep -P 'tests/queries/0_stateless/.*\.sql' ci-changed-files.txt | sed -r -e 's!^!ch/!' | sort -R)"
# ci-changed-files.txt contains also files that has been deleted/renamed, filter them out.
NEW_TESTS="$(filter_exists $NEW_TESTS)"
if [[ -n "$NEW_TESTS" ]]
then
NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}"

View File

@ -3,7 +3,7 @@
<mysql_port remove="remove"/>
<interserver_http_port remove="remove"/>
<tcp_with_proxy_port remove="remove"/>
<test_keeper_server remove="remove"/>
<keeper_server remove="remove"/>
<listen_host>::</listen_host>
<logger>

View File

@ -266,14 +266,13 @@ for query_index in queries_to_run:
try:
# Will also detect too long queries during warmup stage
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10})
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': args.max_query_seconds})
except clickhouse_driver.errors.Error as e:
# Add query id to the exception to make debugging easier.
e.args = (prewarm_id, *e.args)
e.message = prewarm_id + ': ' + e.message
raise
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
except KeyboardInterrupt:
raise
@ -320,7 +319,7 @@ for query_index in queries_to_run:
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id)
res = c.execute(q, query_id = run_id, settings = {'max_execution_time': args.max_query_seconds})
except clickhouse_driver.errors.Error as e:
# Add query id to the exception to make debugging easier.
e.args = (run_id, *e.args)

View File

@ -2,7 +2,6 @@
FROM ubuntu:20.04
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
RUN mkdir /sqlancer && \
cd /sqlancer && \

View File

@ -13,6 +13,25 @@ dpkg -i package_folder/clickhouse-test_*.deb
function start()
{
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
# NOTE We run "clickhouse server" instead of "clickhouse-server"
# to make "pidof clickhouse-server" return single pid of the main instance.
# We wil run main instance using "service clickhouse-server start"
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--mysql_port 19004 \
--keeper_server.tcp_port 19181 --keeper_server.server_id 2
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
--mysql_port 29004 \
--keeper_server.tcp_port 29181 --keeper_server.server_id 3
fi
counter=0
until clickhouse-client --query "SELECT 1"
do
@ -35,9 +54,8 @@ start
/s3downloader --dataset-names $DATASETS
chmod 777 -R /var/lib/clickhouse
clickhouse-client --query "SHOW DATABASES"
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
clickhouse-client --query "CREATE DATABASE test"
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
service clickhouse-server restart
# Wait for server to start accepting connections
@ -47,24 +65,50 @@ for _ in {1..120}; do
done
clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "SHOW TABLES FROM test"
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
clickhouse-client --query "SHOW TABLES FROM test"
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test ; then
SKIP_LIST_OPT="--use-skip-list"
fi
# We can have several additional options so we path them as array because it's
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
clickhouse-client --query "CREATE DATABASE test ON CLUSTER 'test_cluster_database_replicated'
ENGINE=Replicated('/test/clickhouse/db/test', '{shard}', '{replica}')"
clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1"
clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
clickhouse-client --query "DROP TABLE datasets.hits_v1"
clickhouse-client --query "DROP TABLE datasets.visits_v1"
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
else
clickhouse-client --query "CREATE DATABASE test"
clickhouse-client --query "SHOW TABLES FROM test"
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
fi
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
clickhouse-client --query "SHOW TABLES FROM test"
clickhouse-client --query "SELECT count() FROM test.hits"
clickhouse-client --query "SELECT count() FROM test.visits"
function run_tests()
{
set -x
# We can have several additional options so we path them as array because it's
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
fi
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --use-skip-list --print-time "${ADDITIONAL_OPTIONS[@]}" \
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
}
export -f run_tests
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
@ -73,3 +117,9 @@ mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
fi

View File

@ -12,6 +12,8 @@ UNKNOWN_SIGN = "[ UNKNOWN "
SKIPPED_SIGN = "[ SKIPPED "
HUNG_SIGN = "Found hung queries in processlist"
NO_TASK_TIMEOUT_SIGN = "All tests have finished"
def process_test_log(log_path):
total = 0
skipped = 0
@ -19,10 +21,13 @@ def process_test_log(log_path):
failed = 0
success = 0
hung = False
task_timeout = True
test_results = []
with open(log_path, 'r') as test_file:
for line in test_file:
line = line.strip()
if NO_TASK_TIMEOUT_SIGN in line:
task_timeout = False
if HUNG_SIGN in line:
hung = True
if any(sign in line for sign in (OK_SIGN, FAIL_SING, UNKNOWN_SIGN, SKIPPED_SIGN)):
@ -52,7 +57,7 @@ def process_test_log(log_path):
else:
success += int(OK_SIGN in line)
test_results.append((test_name, "OK", test_time))
return total, skipped, unknown, failed, success, hung, test_results
return total, skipped, unknown, failed, success, hung, task_timeout, test_results
def process_result(result_path):
test_results = []
@ -68,7 +73,7 @@ def process_result(result_path):
state = "error"
if result_path and os.path.exists(result_path):
total, skipped, unknown, failed, success, hung, test_results = process_test_log(result_path)
total, skipped, unknown, failed, success, hung, task_timeout, test_results = process_test_log(result_path)
is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1))
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
@ -78,6 +83,9 @@ def process_result(result_path):
if hung:
description = "Some queries hung, "
state = "failure"
elif task_timeout:
description = "Timeout, "
state = "failure"
else:
description = ""

View File

@ -34,36 +34,61 @@ if [ "$NUM_TRIES" -gt "1" ]; then
# simpliest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
sleep 5
else
service clickhouse-server start && sleep 5
service clickhouse-server start
fi
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
SKIP_LIST_OPT="--use-skip-list"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server1/config.xml --daemon \
-- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \
--logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--mysql_port 19004 \
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--macros.replica r2 # It doesn't work :(
sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \
-- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \
--logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
--mysql_port 29004 \
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
--macros.shard s2 # It doesn't work :(
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
sleep 5
function run_tests()
{
set -x
# We can have several additional options so we path them as array because it's
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
# Skip these tests, because they fail when we rerun them multiple times
if [ "$NUM_TRIES" -gt "1" ]; then
ADDITIONAL_OPTIONS+=('--order=random')
ADDITIONAL_OPTIONS+=('--skip')
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('4')
# Note that flaky check must be ran in parallel, but for now we run
# everything in parallel except DatabaseReplicated. See below.
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
else
# Too many tests fail for DatabaseReplicated in parallel. All other
# configurations are OK.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('8')
fi
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
--test-runs "$NUM_TRIES" \
"$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
--use-skip-list --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
}
@ -74,10 +99,23 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||:
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||:
clickhouse-client -q "system flush logs" ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
wait ||:
mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
fi
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
fi

View File

@ -126,7 +126,13 @@ Contribute all new information in English language. Other languages are translat
### Adding a New File
When adding a new file:
When you add a new file, it should end with a link like:
`[Original article](https://clickhouse.tech/docs/<path-to-the-page>) <!--hide-->`
and there should be **a new empty line** after it.
{## When adding a new file:
- Make symbolic links for all other languages. You can use the following commands:
@ -134,7 +140,7 @@ When adding a new file:
$ cd /ClickHouse/clone/directory/docs
$ ln -sr en/new/file.md lang/new/file.md
```
##}
<a name="adding-a-new-language"/>
### Adding a New Language
@ -195,8 +201,11 @@ Templates:
- [Function](_description_templates/template-function.md)
- [Setting](_description_templates/template-setting.md)
- [Server Setting](_description_templates/template-server-setting.md)
- [Database or Table engine](_description_templates/template-engine.md)
- [System table](_description_templates/template-system-table.md)
- [Data type](_description_templates/data-type.md)
- [Statement](_description_templates/statement.md)
<a name="how-to-build-docs"/>

View File

@ -5,43 +5,77 @@ toc_title: Build on Mac OS X
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
Build should work on Mac OS X 10.15 (Catalina).
Build should work on x86_64 (Intel) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers.
## Install Homebrew {#install-homebrew}
``` bash
$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
```
## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools}
Install the latest [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12) from App Store.
Open it at least once to accept the end-user license agreement and automatically install the required components.
Then, make sure that the latest Comman Line Tools are installed and selected in the system:
``` bash
$ sudo rm -rf /Library/Developer/CommandLineTools
$ sudo xcode-select --install
```
Reboot.
## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries}
``` bash
$ brew install cmake ninja libtool gettext llvm
$ brew update
$ brew install cmake ninja libtool gettext llvm gcc
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
``` bash
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
```
or
``` bash
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
$ cd ClickHouse
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://github.com/ClickHouse/ClickHouse.git
```
## Build ClickHouse {#build-clickhouse}
> Please note: ClickHouse doesn't support build with native Apple Clang compiler, we need use clang from LLVM.
To build using Xcode's native AppleClang compiler:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake .. -DCMAKE_C_COMPILER=`brew --prefix llvm`/bin/clang -DCMAKE_CXX_COMPILER=`brew --prefix llvm`/bin/clang++ -DCMAKE_PREFIX_PATH=`brew --prefix llvm`
$ ninja
$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```
To build using Homebrew's vanilla Clang compiler:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```
To build using Homebrew's vanilla GCC compiler:
``` bash
$ cd ClickHouse
$ rm -rf build
$ mkdir build
$ cd build
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
$ cmake --build . --config RelWithDebInfo
$ cd ..
```

View File

@ -18,4 +18,8 @@ You can also use the following database engines:
- [Lazy](../../engines/database-engines/lazy.md)
- [Atomic](../../engines/database-engines/atomic.md)
- [PostgreSQL](../../engines/database-engines/postgresql.md)
[Original article](https://clickhouse.tech/docs/en/database_engines/) <!--hide-->

View File

@ -69,7 +69,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
- MySQl `DELETE` query is converted into `INSERT` with `_sign=-1`.
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.

View File

@ -0,0 +1,138 @@
---
toc_priority: 35
toc_title: PostgreSQL
---
# PostgreSQL {#postgresql}
Allows to connect to databases on a remote [PostgreSQL](https://www.postgresql.org) server. Supports read and write operations (`SELECT` and `INSERT` queries) to exchange data between ClickHouse and PostgreSQL.
Gives the real-time access to table list and table structure from remote PostgreSQL with the help of `SHOW TABLES` and `DESCRIBE TABLE` queries.
Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) it set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries.
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE test_database
ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]);
```
**Engine Parameters**
- `host:port` — PostgreSQL server address.
- `database` — Remote database name.
- `user` — PostgreSQL user.
- `password` — User password.
- `use_table_cache` — Defines if the database table structure is cached or not. Optional. Default value: `0`.
## Data Types Support {#data_types-support}
| PostgerSQL | ClickHouse |
|------------------|--------------------------------------------------------------|
| DATE | [Date](../../sql-reference/data-types/date.md) |
| TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| REAL | [Float32](../../sql-reference/data-types/float.md) |
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
| DECIMAL, NUMERIC | [Decimal](../../sql-reference/data-types/decimal.md) |
| SMALLINT | [Int16](../../sql-reference/data-types/int-uint.md) |
| INTEGER | [Int32](../../sql-reference/data-types/int-uint.md) |
| BIGINT | [Int64](../../sql-reference/data-types/int-uint.md) |
| SERIAL | [UInt32](../../sql-reference/data-types/int-uint.md) |
| BIGSERIAL | [UInt64](../../sql-reference/data-types/int-uint.md) |
| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) |
| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))|
| ARRAY | [Array](../../sql-reference/data-types/array.md) |
## Examples of Use {#examples-of-use}
Database in ClickHouse, exchanging data with the PostgreSQL server:
``` sql
CREATE DATABASE test_database
ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1);
```
``` sql
SHOW DATABASES;
```
``` text
┌─name──────────┐
│ default │
│ test_database │
│ system │
└───────────────┘
```
``` sql
SHOW TABLES FROM test_database;
```
``` text
┌─name───────┐
│ test_table │
└────────────┘
```
Reading data from the PostgreSQL table:
``` sql
SELECT * FROM test_database.test_table;
```
``` text
┌─id─┬─value─┐
│ 1 │ 2 │
└────┴───────┘
```
Writing data to the PostgreSQL table:
``` sql
INSERT INTO test_database.test_table VALUES (3,4);
SELECT * FROM test_database.test_table;
```
``` text
┌─int_id─┬─value─┐
│ 1 │ 2 │
│ 3 │ 4 │
└────────┴───────┘
```
Consider the table structure was modified in PostgreSQL:
``` sql
postgre> ALTER TABLE test_table ADD COLUMN data Text
```
As the `use_table_cache` parameter was set to `1` when the database was created, the table structure in ClickHouse was cached and therefore not modified:
``` sql
DESCRIBE TABLE test_database.test_table;
```
``` text
┌─name───┬─type──────────────┐
│ id │ Nullable(Integer) │
│ value │ Nullable(Integer) │
└────────┴───────────────────┘
```
After detaching the table and attaching it again, the structure was updated:
``` sql
DETACH TABLE test_database.test_table;
ATTACH TABLE test_database.test_table;
DESCRIBE TABLE test_database.test_table;
```
``` text
┌─name───┬─type──────────────┐
│ id │ Nullable(Integer) │
│ value │ Nullable(Integer) │
│ data │ Nullable(String) │
└────────┴───────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/database-engines/postgresql/) <!--hide-->

View File

@ -47,12 +47,17 @@ Engines for communicating with other data storage and processing systems.
Engines in the family:
- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka)
- [MySQL](../../engines/table-engines/integrations/mysql.md#mysql)
- [ODBC](../../engines/table-engines/integrations/odbc.md#table-engine-odbc)
- [JDBC](../../engines/table-engines/integrations/jdbc.md#table-engine-jdbc)
- [HDFS](../../engines/table-engines/integrations/hdfs.md#hdfs)
- [S3](../../engines/table-engines/integrations/s3.md#table-engine-s3)
- [ODBC](../../engines/table-engines/integrations/odbc.md)
- [JDBC](../../engines/table-engines/integrations/jdbc.md)
- [MySQL](../../engines/table-engines/integrations/mysql.md)
- [MongoDB](../../engines/table-engines/integrations/mongodb.md)
- [HDFS](../../engines/table-engines/integrations/hdfs.md)
- [S3](../../engines/table-engines/integrations/s3.md)
- [Kafka](../../engines/table-engines/integrations/kafka.md)
- [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md)
- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md)
- [PostgreSQL](../../engines/table-engines/integrations/postgresql.md)
### Special Engines {#special-engines}

View File

@ -1,5 +1,5 @@
---
toc_priority: 6
toc_priority: 9
toc_title: EmbeddedRocksDB
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 4
toc_priority: 6
toc_title: HDFS
---

View File

@ -1,6 +1,6 @@
---
toc_folder_title: Integrations
toc_priority: 30
toc_priority: 1
---
# Table Engines for Integrations {#table-engines-for-integrations}
@ -19,5 +19,3 @@ List of supported integrations:
- [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md)
- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md)
- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/) <!--hide-->

View File

@ -1,5 +1,5 @@
---
toc_priority: 2
toc_priority: 3
toc_title: JDBC
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 5
toc_priority: 8
toc_title: Kafka
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 7
toc_priority: 5
toc_title: MongoDB
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 3
toc_priority: 4
toc_title: MySQL
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 1
toc_priority: 2
toc_title: ODBC
---

View File

@ -1,11 +1,11 @@
---
toc_priority: 8
toc_priority: 11
toc_title: PostgreSQL
---
# PosgtreSQL {#postgresql}
# PostgreSQL {#postgresql}
The PostgreSQL engine allows you to perform `SELECT` queries on data that is stored on a remote PostgreSQL server.
The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote PostgreSQL server.
## Creating a Table {#creating-a-table}
@ -15,7 +15,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password');
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
@ -29,25 +29,51 @@ The table structure can differ from the original PostgreSQL table structure:
**Engine Parameters**
- `host:port` — PostgreSQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — PostgreSQL user.
- `password` — User password.
- `schema` — Non-default table schema. Optional.
SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query.
## Implementation Details {#implementation-details}
Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server.
`SELECT` queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query.
Simple `WHERE` clauses such as `=`, `!=`, `>`, `>=`, `<`, `<=`, and `IN` are executed on the PostgreSQL server.
All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes.
INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement.
`INSERT` queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement.
PostgreSQL Array types converts into ClickHouse arrays.
Be careful in PostgreSQL an array data created like a type_name[] may contain multi-dimensional arrays of different dimensions in different table rows in same column, but in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
PostgreSQL `Array` types are converted into ClickHouse arrays.
!!! info "Note"
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.
In the example below replica `example01-1` has the highest priority:
```xml
<postgresql>
<port>5432</port>
<user>clickhouse</user>
<password>qwerty</password>
<replica>
<host>example01-1</host>
<priority>1</priority>
</replica>
<replica>
<host>example01-2</host>
<priority>2</priority>
</replica>
<db>db_name</db>
<table>table_name</table>
<where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query>
</postgresql>
</source>
```
## Usage Example {#usage-example}
@ -64,10 +90,10 @@ PRIMARY KEY (int_id));
CREATE TABLE
postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2);
postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> select * from test;
postgresql> SELECT * FROM test;
int_id | int_nullable | float | str | float_nullable
--------+--------------+-------+------+----------------
1 | | 2 | test |
@ -87,20 +113,33 @@ ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgre
```
``` sql
SELECT * FROM postgresql_table WHERE str IN ('test')
SELECT * FROM postgresql_table WHERE str IN ('test');
```
``` text
┌─float_nullable─┬─str──┬─int_id─┐
│ ᴺᵁᴸᴸ │ test │ 1 │
└────────────────┴──────┴────────┘
1 rows in set. Elapsed: 0.019 sec.
```
Using Non-default Schema:
## See Also {#see-also}
```text
postgres=# CREATE SCHEMA "nice.schema";
- [The postgresql table function](../../../sql-reference/table-functions/postgresql.md)
postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer);
postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i)
```
```sql
CREATE TABLE pg_table_schema_with_dots (a UInt32)
ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema');
```
**See Also**
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->

View File

@ -1,5 +1,5 @@
---
toc_priority: 6
toc_priority: 10
toc_title: RabbitMQ
---

View File

@ -1,5 +1,5 @@
---
toc_priority: 4
toc_priority: 7
toc_title: S3
---

View File

@ -19,7 +19,7 @@ $ echo '{"foo":"bar"}' | curl 'http://localhost:8123/?query=INSERT%20INTO%20test
Using [CLI interface](../../interfaces/cli.md):
``` bash
$ echo '{"foo":"bar"}' | clickhouse-client ---query="INSERT INTO test FORMAT JSONEachRow"
$ echo '{"foo":"bar"}' | clickhouse-client --query="INSERT INTO test FORMAT JSONEachRow"
```
Instead of inserting data manually, you might consider to use one of [client libraries](../../interfaces/index.md) instead.

View File

@ -50,7 +50,7 @@ The supported formats are:
| [Parquet](#data-format-parquet) | ✔ | ✔ |
| [Arrow](#data-format-arrow) | ✔ | ✔ |
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | |
| [ORC](#data-format-orc) | ✔ | |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [Native](#native) | ✔ | ✔ |
@ -1284,32 +1284,33 @@ To exchange data with Hadoop, you can use [HDFS table engine](../engines/table-e
## ORC {#data-format-orc}
[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse.
[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the [Hadoop](https://hadoop.apache.org/) ecosystem.
### Data Types Matching {#data_types-matching-3}
The table below shows supported data types and how they match ClickHouse [data types](../sql-reference/data-types/index.md) in `INSERT` queries.
The table below shows supported data types and how they match ClickHouse [data types](../sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
| ORC data type (`INSERT`) | ClickHouse data type |
|--------------------------|-----------------------------------------------------|
| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) |
| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) |
| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) |
| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) |
| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) |
| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) |
| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) |
| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) |
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) |
| `DATE32` | [Date](../sql-reference/data-types/date.md) |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) |
| ORC data type (`INSERT`) | ClickHouse data type | ORC data type (`SELECT`) |
|--------------------------|-----------------------------------------------------|--------------------------|
| `UINT8`, `BOOL` | [UInt8](../sql-reference/data-types/int-uint.md) | `UINT8` |
| `INT8` | [Int8](../sql-reference/data-types/int-uint.md) | `INT8` |
| `UINT16` | [UInt16](../sql-reference/data-types/int-uint.md) | `UINT16` |
| `INT16` | [Int16](../sql-reference/data-types/int-uint.md) | `INT16` |
| `UINT32` | [UInt32](../sql-reference/data-types/int-uint.md) | `UINT32` |
| `INT32` | [Int32](../sql-reference/data-types/int-uint.md) | `INT32` |
| `UINT64` | [UInt64](../sql-reference/data-types/int-uint.md) | `UINT64` |
| `INT64` | [Int64](../sql-reference/data-types/int-uint.md) | `INT64` |
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `DATE32` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `-` | [Array](../sql-reference/data-types/array.md) | `LIST` |
ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type.
Unsupported ORC data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
Unsupported ORC data types: `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
The data types of ClickHouse table columns dont have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) the data to the data type set for the ClickHouse table column.
@ -1321,6 +1322,14 @@ You can insert ORC data from a file into ClickHouse table by the following comma
$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC"
```
### Selecting Data {#selecting-data-2}
You can select data from a ClickHouse table and save them into some file in the ORC format by the following command:
``` bash
$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.orc}
```
To exchange data with Hadoop, you can use [HDFS table engine](../engines/table-engines/integrations/hdfs.md).
## LineAsString {#lineasstring}

View File

@ -12,9 +12,12 @@ toc_title: Adopters
|---------|----------|---------|--------------|------------------------------------------------------------------------------|-----------|
| <a href="https://2gis.ru" class="favicon">2gis</a> | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) |
| <a href="https://getadmiral.com/" class="favicon">Admiral</a> | Martech | Engagement Management | — | — | [Webinar Slides, June 2020](https://altinity.com/presentations/2020/06/16/big-data-in-real-time-how-clickhouse-powers-admirals-visitor-relationships-for-publishers) |
| <a href="http://www.adscribe.tv/" class="favicon">AdScribe</a> | Ads | TV Analytics | — | — | [A quote from CTO](https://altinity.com/24x7-support/) |
| <a href="https://cn.aliyun.com/" class="favicon">Alibaba Cloud</a> | Cloud | Managed Service | — | — | [Official Website](https://help.aliyun.com/product/144466.html) |
| <a href="https://alohabrowser.com/" class="favicon">Aloha Browser</a> | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://presentations.clickhouse.tech/meetup22/aloha.pdf) |
| <a href="https://altinity.com/" class="favicon">Altinity</a> | Cloud, SaaS | Main product | — | — | [Official Website](https://altinity.com/) |
| <a href="https://amadeus.com/" class="favicon">Amadeus</a> | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) |
| <a href="https://apiroad.net/" class="favicon">ApiRoad</a> | API marketplace | Analytics | — | — | [Blog post, Nov 2018, Mar 2020](https://pixeljets.com/blog/clickhouse-vs-elasticsearch/) |
| <a href="https://www.appsflyer.com" class="favicon">Appsflyer</a> | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) |
| <a href="https://arenadata.tech/" class="favicon">ArenaData</a> | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) |
| <a href="https://avito.ru/" class="favicon">Avito</a> | Classifieds | Monitoring | — | — | [Meetup, April 2020](https://www.youtube.com/watch?v=n1tm4j4W8ZQ) |
@ -37,6 +40,7 @@ toc_title: Adopters
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
| <a href="https://crazypanda.ru/en/" class="favicon">Crazypanda</a> | Games | | — | — | Live session on ClickHouse meetup |
| <a href="https://www.criteo.com/" class="favicon">Criteo</a> | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) |
| <a href="https://cryptology.com/" class="favicon">Cryptology</a> | Digital Assets Trading Platform | — | — | — | [Job advertisement, March 2021](https://career.habr.com/companies/cryptology/vacancies) |
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
| <a href="https://deeplay.io/eng/" class="favicon">Deeplay</a> | Gaming Analytics | — | — | — | [Job advertisement, 2020](https://career.habr.com/vacancies/1000062568) |
@ -49,11 +53,13 @@ toc_title: Adopters
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | 14 bn records/day as of Jan 2021 | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
| <a href="https://glaber.io/" class="favicon">Glaber</a> | Monitoring | Main product | — | — | [Website](https://glaber.io/) |
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
| <a href="https://www.the-ica.com/" class="favicon">ICA</a> | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) |
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
| <a href="https://instabug.com/" class="favicon">Instabug</a> | APM Platform | Main product | — | — | [A quote from Co-Founder](https://altinity.com/) |
| <a href="https://www.instana.com" class="favicon">Instana</a> | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) |
| <a href="https://integros.com" class="favicon">Integros</a> | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
| <a href="https://ippon.tech" class="favicon">Ippon Technologies</a> | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) |
@ -65,15 +71,19 @@ toc_title: Adopters
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
| <a href="https://maxilect.com/" class="favicon">MAXILECT</a> | Ad Tech, Blockchain, ML, AI | — | — | — | [Job advertisement, 2021](https://www.linkedin.com/feed/update/urn:li:activity:6780842017229430784/) |
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
| <a href="https://mellodesign.ru/" class="favicon">Mello</a> | Marketing | Analytics | 1 server | — | [Article, Oct 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) |
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
| <a href="https://www.mindsdb.com/" class="favicon">MindsDB</a> | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |x
| <a href="https://mux.com/" class="favicon">MUX</a> | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) |
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
| <a href="https://www.netskope.com/" class="favicon">Netskope</a> | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) |
| <a href="https://niclabs.cl/" class="favicon">NIC Labs</a> | Network Monitoring | RaTA-DNS | — | — | [Blog post, March 2021](https://niclabs.cl/ratadns/2021/03/Clickhouse) |
| <a href="https://getnoc.com/" class="favicon">NOC Project</a> | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) |
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
| <a href="https://corp.ozon.com/" class="favicon">OZON</a> | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) |
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
@ -90,14 +100,17 @@ toc_title: Adopters
| <a href="https://rspamd.com/" class="favicon">Rspamd</a> | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) |
| <a href="https://rusiem.com/en" class="favicon">RuSIEM</a> | SIEM | Main Product | — | — | [Official Website](https://rusiem.com/en/products/architecture) |
| <a href="https://www.s7.ru" class="favicon">S7 Airlines</a> | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) |
| <a href="https://www.sberbank.com/index" class="favicon">Sber</a> | Banking, Fintech, Retail, Cloud, Media | — | — | — | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) |
| <a href="https://www.scireum.de/" class="favicon">scireum GmbH</a> | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) |
| <a href="https://segment.com/" class="favicon">Segment</a> | Data processing | Main product | 9 * i3en.3xlarge nodes 7.5TB NVME SSDs, 96GB Memory, 12 vCPUs | — | [Slides, 2019](https://slides.com/abraithwaite/segment-clickhouse) |
| <a href="https://sembot.io/" class="favicon">sembot.io</a> | Shopping Ads | — | — | — | A comment on LinkedIn, 2020 |
| <a href="https://www.semrush.com/" class="favicon">SEMrush</a> | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) |
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
| <a href="https://www.spark.co.nz/" class="favicon">Spark New Zealand</a> | Telecommunications | Security Operations | — | — | [Blog Post, Feb 2020](https://blog.n0p.me/2020/02/2020-02-05-dnsmonster/) |
| <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) |
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
| <a href="https://www.staffcop.ru/" class="favicon">Staffcop</a> | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) |
@ -106,23 +119,31 @@ toc_title: Adopters
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
| <a href="https://www.tencentmusic.com/" class="favicon">Tencent Music Entertainment (TME)</a> | BigData | Data processing | — | — | [Blog in Chinese, June 2020](https://cloud.tencent.com/developer/article/1637840) |
| <a href="https://www.tinybird.co/" class="favicon">Tinybird</a> | Real-time Data Products | Data processing | — | — | [Official website](https://www.tinybird.co/) |
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
| <a href="https://www.uber.com" class="favicon">Uber</a> | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) |
| <a href="https://hello.utmstat.com/" class="favicon">UTMSTAT</a> | Analytics | Main product | — | — | [Blog post, June 2020](https://vc.ru/tribuna/133956-striming-dannyh-iz-servisa-skvoznoy-analitiki-v-clickhouse) |
| <a href="https://vk.com" class="favicon">VKontakte</a> | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) |
| <a href="https://www.vmware.com/" class="favicon">VMWare</a> | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) |
| <a href="https://www.walmartlabs.com/" class="favicon">Walmart Labs</a> | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) |
| <a href="https://wargaming.com/en/" class="favicon">Wargaming</a> | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
| <a href="https://www.wildberries.ru/" class="favicon">Wildberries</a> | E-commerce | | — | — | [Official website](https://it.wildberries.ru/) |
| <a href="https://wisebits.com/" class="favicon">Wisebits</a> | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
| <a href="https://www.workato.com/" class="favicon">Workato</a> | Automation Software | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=334) |
| <a href="https://xenoss.io/" class="favicon">Xenoss</a> | Marketing, Advertising | — | — | — | [Instagram, March 2021](https://www.instagram.com/p/CNATV7qBgB1/) |
| <a href="http://www.xiaoxintech.cn/" class="favicon">Xiaoxin Tech</a> | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) |
| <a href="https://www.ximalaya.com/" class="favicon">Ximalaya</a> | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) |
| <a href="https://cloud.yandex.ru/services/managed-clickhouse" class="favicon">Yandex Cloud</a> | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) |
| <a href="https://cloud.yandex.ru/services/datalens" class="favicon">Yandex DataLens</a> | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) |
| <a href="https://market.yandex.ru/" class="favicon">Yandex Market</a> | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) |
| <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Main product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) |
| <a href="https://www.yotascale.com/" class="favicon">Yotascale</a> | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) |
| <a href="https://htc-cs.ru/" class="favicon">ЦВТ</a> | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) |
| <a href="https://mkb.ru/" class="favicon">МКБ</a> | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
| <a href="https://cft.ru/" class="favicon">ЦФТ</a> | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) |
| <a href="https://promo.croc.ru/digitalworker" class="favicon">Цифровой Рабочий</a> | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) |
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -502,7 +502,15 @@ On hosts with low RAM and swap, you possibly need setting `max_server_memory_usa
## max_concurrent_queries {#max-concurrent-queries}
The maximum number of simultaneously processed requests.
The maximum number of simultaneously processed queries related to MergeTree table. Queries may be limited by other settings: [max_concurrent_queries_for_all_users](#max-concurrent-queries-for-all-users), [min_marks_to_honor_max_concurrent_queries](#min-marks-to-honor-max-concurrent-queries).
!!! info "Note"
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
Possible values:
- Positive integer.
- 0 — Disabled.
**Example**
@ -530,6 +538,21 @@ Default value: `0` that means no limit.
- [max_concurrent_queries](#max-concurrent-queries)
## min_marks_to_honor_max_concurrent_queries {#min-marks-to-honor-max-concurrent-queries}
The minimal number of marks read by the query for applying the [max_concurrent_queries](#max-concurrent-queries) setting.
Possible values:
- Positive integer.
- 0 — Disabled.
**Example**
``` xml
<min_marks_to_honor_max_concurrent_queries>10</min_marks_to_honor_max_concurrent_queries>
```
## max_connections {#max-connections}
The maximum number of inbound connections.

View File

@ -769,6 +769,38 @@ Example:
log_query_threads=1
```
## log_comment {#settings-log-comment}
Specifies the value for the `log_comment` field of the [system.query_log](../system-tables/query_log.md) table and comment text for the server log.
It can be used to improve the readability of server logs. Additionally, it helps to select queries related to the test from the `system.query_log` after running [clickhouse-test](../../development/tests.md).
Possible values:
- Any string no longer than [max_query_size](#settings-max_query_size). If length is exceeded, the server throws an exception.
Default value: empty string.
**Example**
Query:
``` sql
SET log_comment = 'log_comment test', log_queries = 1;
SELECT 1;
SYSTEM FLUSH LOGS;
SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 2;
```
Result:
``` text
┌─type────────┬─query─────┐
│ QueryStart │ SELECT 1; │
│ QueryFinish │ SELECT 1; │
└─────────────┴───────────┘
```
## max_insert_block_size {#settings-max_insert_block_size}
The size of blocks (in a count of rows) to form for insertion into a table.
@ -1514,6 +1546,14 @@ FORMAT PrettyCompactMonoBlock
Default value: 0
## optimize_skip_unused_shards_limit {#optimize-skip-unused-shards-limit}
Limit for number of sharding key values, turns off `optimize_skip_unused_shards` if the limit is reached.
Too many values may require significant amount for processing, while the benefit is doubtful, since if you have huge number of values in `IN (...)`, then most likely the query will be sent to all shards anyway.
Default value: 1000
## optimize_skip_unused_shards {#optimize-skip-unused-shards}
Enables or disables skipping of unused shards for [SELECT](../../sql-reference/statements/select/index.md) queries that have sharding key condition in `WHERE/PREWHERE` (assuming that the data is distributed by sharding key, otherwise does nothing).
@ -1874,7 +1914,7 @@ Default value: `0`.
Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table when there is no distributed key.
By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards.
By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will reject any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards.
Possible values:
@ -2728,11 +2768,11 @@ Default value: `0`.
## engine_file_truncate_on_insert {#engine-file-truncate-on-insert}
Enables or disables truncate before insert in file engine tables.
Enables or disables truncate before insert in [File](../../engines/table-engines/special/file.md) engine tables.
Possible values:
- 0 — Disabled.
- 1 — Enabled.
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` replaces existing content of the file with the new data.
Default value: `0`.
@ -2747,4 +2787,39 @@ Possible values:
Default value: `0`.
## allow_experimental_live_view {#allow-experimental-live-view}
Allows creation of experimental [live views](../../sql-reference/statements/create/view.md#live-view).
Possible values:
- 0 — Working with live views is disabled.
- 1 — Working with live views is enabled.
Default value: `0`.
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md#live-view) is alive .
Default value: `15`.
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md#live-view) is re-executed.
Default value: `64`.
## temporary_live_view_timeout {#temporary-live-view-timeout}
Sets the interval in seconds after which [live view](../../sql-reference/statements/create/view.md#live-view) with timeout is deleted.
Default value: `5`.
## periodic_live_view_refresh {#periodic-live-view-refresh}
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md#live-view) is forced to refresh.
Default value: `60`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -9,7 +9,7 @@ Columns:
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened.
- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened.
- `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error.
- `last_error_stacktrace` ([String](../../sql-reference/data-types/string.md)) — stacktrace for the last error.
- `last_error_trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) which represents a list of physical addresses where the called methods are stored.
- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query).
**Example**
@ -25,3 +25,12 @@ LIMIT 1
│ CANNOT_OPEN_FILE │ 76 │ 1 │
└──────────────────┴──────┴───────┘
```
``` sql
WITH arrayMap(x -> demangle(addressToSymbol(x)), last_error_trace) AS all
SELECT name, arrayStringConcat(all, '\n') AS res
FROM system.errors
LIMIT 1
SETTINGS allow_introspection_functions=1\G
```

View File

@ -14,7 +14,17 @@ Columns:
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS`, or `MUTATE_PARTS`.
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of:
- `GET_PART` - Get the part from another replica.
- `ATTACH_PART` - Attach the part, possibly from our own replica (if found in `detached` folder).
You may think of it as a `GET_PART` with some optimisations as they're nearly identical.
- `MERGE_PARTS` - Merge the parts.
- `DROP_RANGE` - Delete the parts in the specified partition in the specified number range.
- `CLEAR_COLUMN` - NOTE: Deprecated. Drop specific column from specified partition.
- `CLEAR_INDEX` - NOTE: Deprecated. Drop specific index from specified partition.
- `REPLACE_RANGE` - Drop certain range of partitions and replace them by new ones
- `MUTATE_PART` - Apply one or several mutations to the part.
- `ALTER_METADATA` - Apply alter modification according to global /metadata and /columns paths
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.

View File

@ -243,7 +243,7 @@ The function works according to the algorithm:
**Syntax**
``` sql
windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
```
**Arguments**
@ -253,9 +253,11 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
**Parameters**
- `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`.
- `mode` — It is an optional argument.
- `'strict'` — When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values.
- `window` — Length of the sliding window, it is the time interval between first condition and last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`.
- `mode` — It is an optional argument. One or more modes can be set.
- `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped.
- `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2.
- `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps.
**Returned value**

View File

@ -320,8 +320,6 @@ Similar to `cache`, but stores data on SSD and index in RAM.
<write_buffer_size>1048576</write_buffer_size>
<!-- Path where cache file will be stored. -->
<path>/var/lib/clickhouse/clickhouse_dictionaries/test_dict</path>
<!-- Max number on stored keys in the cache. Rounded up to a power of two. -->
<max_stored_keys>1048576</max_stored_keys>
</ssd_cache>
</layout>
```
@ -329,8 +327,8 @@ Similar to `cache`, but stores data on SSD and index in RAM.
or
``` sql
LAYOUT(CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576
PATH /var/lib/clickhouse/clickhouse_dictionaries/test_dict MAX_STORED_KEYS 1048576))
LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 16777216 READ_BUFFER_SIZE 1048576
PATH /var/lib/clickhouse/clickhouse_dictionaries/test_dict))
```
### complex_key_ssd_cache {#complex-key-ssd-cache}

View File

@ -69,6 +69,8 @@ Types of sources (`source_type`):
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
- [Redis](#dicts-external_dicts_dict_sources-redis)
- [Cassandra](#dicts-external_dicts_dict_sources-cassandra)
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
## Local File {#dicts-external_dicts_dict_sources-local_file}

View File

@ -245,7 +245,7 @@ Elements set to `NULL` are handled as normal values.
Returns the number of elements in the arr array for which func returns something other than 0. If func is not specified, it returns the number of non-zero elements in the array.
Note that the `arrayCount` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
Note that the `arrayCount` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
## countEqual(arr, x) {#countequalarr-x}
@ -1229,7 +1229,7 @@ SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5,
└────────────────────────────────────┘
```
Note that the `arrayReverseFilter` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
Note that the `arrayReverseFill` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
## arraySplit(func, arr1, …) {#array-split}
@ -1293,7 +1293,7 @@ Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference
## arrayMin {#array-min}
Returns the minimum of elements in the source array.
Returns the minimum of elements in the source array.
If the `func` function is specified, returns the mininum of elements converted by this function.
@ -1312,9 +1312,9 @@ arrayMin([func,] arr)
**Returned value**
- The minimum of function values (or the array minimum).
- The minimum of function values (or the array minimum).
Type: if `func` is specified, matches `func` return value type, else matches the array elements type.
Type: if `func` is specified, matches `func` return value type, else matches the array elements type.
**Examples**
@ -1348,7 +1348,7 @@ Result:
## arrayMax {#array-max}
Returns the maximum of elements in the source array.
Returns the maximum of elements in the source array.
If the `func` function is specified, returns the maximum of elements converted by this function.
@ -1367,9 +1367,9 @@ arrayMax([func,] arr)
**Returned value**
- The maximum of function values (or the array maximum).
- The maximum of function values (or the array maximum).
Type: if `func` is specified, matches `func` return value type, else matches the array elements type.
Type: if `func` is specified, matches `func` return value type, else matches the array elements type.
**Examples**
@ -1403,7 +1403,7 @@ Result:
## arraySum {#array-sum}
Returns the sum of elements in the source array.
Returns the sum of elements in the source array.
If the `func` function is specified, returns the sum of elements converted by this function.
@ -1418,7 +1418,7 @@ arraySum([func,] arr)
**Arguments**
- `func` — Function. [Expression](../../sql-reference/data-types/special-data-types/expression.md).
- `arr` — Array. [Array](../../sql-reference/data-types/array.md).
- `arr` — Array. [Array](../../sql-reference/data-types/array.md).
**Returned value**
@ -1458,7 +1458,7 @@ Result:
## arrayAvg {#array-avg}
Returns the average of elements in the source array.
Returns the average of elements in the source array.
If the `func` function is specified, returns the average of elements converted by this function.
@ -1473,7 +1473,7 @@ arrayAvg([func,] arr)
**Arguments**
- `func` — Function. [Expression](../../sql-reference/data-types/special-data-types/expression.md).
- `arr` — Array. [Array](../../sql-reference/data-types/array.md).
- `arr` — Array. [Array](../../sql-reference/data-types/array.md).
**Returned value**

View File

@ -250,3 +250,53 @@ Result:
└───────────────┘
```
## bitHammingDistance {#bithammingdistance}
Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between the bit representations of two integer values. Can be used with [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) functions for detection of semi-duplicate strings. The smaller is the distance, the more likely those strings are the same.
**Syntax**
``` sql
bitHammingDistance(int1, int2)
```
**Arguments**
- `int1` — First integer value. [Int64](../../sql-reference/data-types/int-uint.md).
- `int2` — Second integer value. [Int64](../../sql-reference/data-types/int-uint.md).
**Returned value**
- The Hamming distance.
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
**Examples**
Query:
``` sql
SELECT bitHammingDistance(111, 121);
```
Result:
``` text
┌─bitHammingDistance(111, 121)─┐
│ 3 │
└──────────────────────────────┘
```
With [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash):
``` sql
SELECT bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'));
```
Result:
``` text
┌─bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'))─┐
│ 5 │
└──────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -0,0 +1,35 @@
---
toc_priority: 43
toc_title: Files
---
# Functions for Working with Files {#functions-for-working-with-files}
## file {#file}
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.
**Syntax**
``` sql
file(path)
```
**Arguments**
- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following wildcards: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings.
**Example**
Inserting data from files a.txt and b.txt into a table as strings:
Query:
``` sql
INSERT INTO table SELECT file('a.txt'), file('b.txt');
```
**See Also**
- [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path)
- [file](../table-functions/file.md)

View File

@ -7,6 +7,8 @@ toc_title: Hash
Hash functions can be used for the deterministic pseudo-random shuffling of elements.
Simhash is a hash function, which returns close hash values for close (similar) arguments.
## halfMD5 {#hash-functions-halfmd5}
[Interprets](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order.
@ -482,3 +484,938 @@ Result:
- [xxHash](http://cyan4973.github.io/xxHash/).
## ngramSimHash {#ngramsimhash}
Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
ngramSimHash(string[, ngramsize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT ngramSimHash('ClickHouse') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 1627567969 │
└────────────┘
```
## ngramSimHashCaseInsensitive {#ngramsimhashcaseinsensitive}
Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
ngramSimHashCaseInsensitive(string[, ngramsize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash;
```
Result:
``` text
┌──────Hash─┐
│ 562180645 │
└───────────┘
```
## ngramSimHashUTF8 {#ngramsimhashutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case sensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
ngramSimHashUTF8(string[, ngramsize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT ngramSimHashUTF8('ClickHouse') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 1628157797 │
└────────────┘
```
## ngramSimHashCaseInsensitiveUTF8 {#ngramsimhashcaseinsensitiveutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram `simhash`. Is case insensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
ngramSimHashCaseInsensitiveUTF8(string[, ngramsize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 1636742693 │
└────────────┘
```
## wordShingleSimHash {#wordshinglesimhash}
Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
wordShingleSimHash(string[, shinglesize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 2328277067 │
└────────────┘
```
## wordShingleSimHashCaseInsensitive {#wordshinglesimhashcaseinsensitive}
Splits a ASCII string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
wordShingleSimHashCaseInsensitive(string[, shinglesize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 2194812424 │
└────────────┘
```
## wordShingleSimHashUTF8 {#wordshinglesimhashutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case sensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
wordShingleSimHashUTF8(string[, shinglesize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optinal. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 2328277067 │
└────────────┘
```
## wordShingleSimHashCaseInsensitiveUTF8 {#wordshinglesimhashcaseinsensitiveutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words and returns the word shingle `simhash`. Is case insensitive.
Can be used for detection of semi-duplicate strings with [bitHammingDistance](../../sql-reference/functions/bit-functions.md#bithammingdistance). The smaller is the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) of the calculated `simhashes` of two strings, the more likely these strings are the same.
**Syntax**
``` sql
wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Hash value.
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
```
Result:
``` text
┌───────Hash─┐
│ 2194812424 │
└────────────┘
```
## ngramMinHash {#ngramminhash}
Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
ngramMinHash(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT ngramMinHash('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────┐
│ (18333312859352735453,9054248444481805918) │
└────────────────────────────────────────────┘
```
## ngramMinHashCaseInsensitive {#ngramminhashcaseinsensitive}
Splits a ASCII string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
ngramMinHashCaseInsensitive(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────┐
│ (2106263556442004574,13203602793651726206) │
└────────────────────────────────────────────┘
```
## ngramMinHashUTF8 {#ngramminhashutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
ngramMinHashUTF8(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT ngramMinHashUTF8('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────┐
│ (18333312859352735453,6742163577938632877) │
└────────────────────────────────────────────┘
```
## ngramMinHashCaseInsensitiveUTF8 {#ngramminhashcaseinsensitiveutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and calculates hash values for each n-gram. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple───────────────────────────────────────┐
│ (12493625717655877135,13203602793651726206) │
└─────────────────────────────────────────────┘
```
## ngramMinHashArg {#ngramminhasharg}
Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHash](#ngramminhash) function with the same input. Is case sensitive.
**Syntax**
``` sql
ngramMinHashArg(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` n-grams each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT ngramMinHashArg('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
│ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │
└───────────────────────────────────────────────────────────────────────────────┘
```
## ngramMinHashArgCaseInsensitive {#ngramminhashargcaseinsensitive}
Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitive](#ngramminhashcaseinsensitive) function with the same input. Is case insensitive.
**Syntax**
``` sql
ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` n-grams each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
│ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │
└───────────────────────────────────────────────────────────────────────────────┘
```
## ngramMinHashArgUTF8 {#ngramminhashargutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashUTF8](#ngramminhashutf8) function with the same input. Is case sensitive.
**Syntax**
``` sql
ngramMinHashArgUTF8(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` n-grams each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
│ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │
└───────────────────────────────────────────────────────────────────────────────┘
```
## ngramMinHashArgCaseInsensitiveUTF8 {#ngramminhashargcaseinsensitiveutf8}
Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-grams with minimum and maximum hashes, calculated by the [ngramMinHashCaseInsensitiveUTF8](#ngramminhashcaseinsensitiveutf8) function with the same input. Is case insensitive.
**Syntax**
``` sql
ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `ngramsize` — The size of an n-gram. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` n-grams each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
│ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │
└───────────────────────────────────────────────────────────────────────────────┘
```
## wordShingleMinHash {#wordshingleminhash}
Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
wordShingleMinHash(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────┐
│ (16452112859864147620,5844417301642981317) │
└────────────────────────────────────────────┘
```
## wordShingleMinHashCaseInsensitive {#wordshingleminhashcaseinsensitive}
Splits a ASCII string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────┐
│ (3065874883688416519,1634050779997673240) │
└───────────────────────────────────────────┘
```
## wordShingleMinHashUTF8 {#wordshingleminhashutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case sensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
wordShingleMinHashUTF8(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────┐
│ (16452112859864147620,5844417301642981317) │
└────────────────────────────────────────────┘
```
## wordShingleMinHashCaseInsensitiveUTF8 {#wordshingleminhashcaseinsensitiveutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words and calculates hash values for each word shingle. Uses `hashnum` minimum hashes to calculate the minimum hash and `hashnum` maximum hashes to calculate the maximum hash. Returns a tuple with these hashes. Is case insensitive.
Can be used for detection of semi-duplicate strings with [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). For two strings: if one of the returned hashes is the same for both strings, we think that those strings are the same.
**Syntax**
``` sql
wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two hashes — the minimum and the maximum.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────┐
│ (3065874883688416519,1634050779997673240) │
└───────────────────────────────────────────┘
```
## wordShingleMinHashArg {#wordshingleminhasharg}
Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordshingleMinHash](#wordshingleminhash) function with the same input. Is case sensitive.
**Syntax**
``` sql
wordShingleMinHashArg(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` word shingles each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────┐
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
└───────────────────────────────────────────────────────────────────────┘
```
## wordShingleMinHashArgCaseInsensitive {#wordshingleminhashargcaseinsensitive}
Splits a ASCII string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitive](#wordshingleminhashcaseinsensitive) function with the same input. Is case insensitive.
**Syntax**
``` sql
wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` word shingles each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────────────────────────────────┐
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
└────────────────────────────────────────────────────────────────────────┘
```
## wordShingleMinHashArgUTF8 {#wordshingleminhashargutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashUTF8](#wordshingleminhashutf8) function with the same input. Is case sensitive.
**Syntax**
``` sql
wordShingleMinHashArgUTF8(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` word shingles each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
```
Result:
``` text
┌─Tuple─────────────────────────────────────────────────────────────────┐
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
└───────────────────────────────────────────────────────────────────────┘
```
## wordShingleMinHashArgCaseInsensitiveUTF8 {#wordshingleminhashargcaseinsensitiveutf8}
Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and returns the shingles with minimum and maximum word hashes, calculated by the [wordShingleMinHashCaseInsensitiveUTF8](#wordshingleminhashcaseinsensitiveutf8) function with the same input. Is case insensitive.
**Syntax**
``` sql
wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum])
```
**Arguments**
- `string` — String. [String](../../sql-reference/data-types/string.md).
- `shinglesize` — The size of a word shingle. Optional. Possible values: any number from `1` to `25`. Default value: `3`. [UInt8](../../sql-reference/data-types/int-uint.md).
- `hashnum` — The number of minimum and maximum hashes used to calculate the result. Optional. Possible values: any number from `1` to `25`. Default value: `6`. [UInt8](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Tuple with two tuples with `hashnum` word shingles each.
Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md)), [Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md))).
**Example**
Query:
``` sql
SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
```
Result:
``` text
┌─Tuple──────────────────────────────────────────────────────────────────┐
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
└────────────────────────────────────────────────────────────────────────┘
```

View File

@ -394,3 +394,55 @@ Result:
└──────────────────┴────────────────────┘
```
## isIPAddressInRange {#isipaddressinrange}
Determines if an IP address is contained in a network represented in the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation. Returns `1` if true, or `0` otherwise.
**Syntax**
``` sql
isIPAddressInRange(address, prefix)
```
This function accepts both IPv4 and IPv6 addresses (and networks) represented as strings. It returns `0` if the IP version of the address and the CIDR don't match.
**Arguments**
- `address` — An IPv4 or IPv6 address. [String](../../sql-reference/data-types/string.md).
- `prefix` — An IPv4 or IPv6 network prefix in CIDR. [String](../../sql-reference/data-types/string.md).
**Returned value**
- `1` or `0`.
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8')
```
Result:
``` text
┌─isIPAddressInRange('127.0.0.1', '127.0.0.0/8')─┐
│ 1 │
└────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16')
```
Result:
``` text
┌─isIPAddressInRange('127.0.0.1', 'ffff::/16')─┐
│ 0 │
└──────────────────────────────────────────────┘
```

View File

@ -12,7 +12,9 @@ The search is case-sensitive by default in all these functions. There are separa
## position(haystack, needle), locate(haystack, needle) {#position}
Returns the position (in bytes) of the found substring in the string, starting from 1.
Searches for the substring `needle` in the string `haystack`.
Returns the position (in bytes) of the found substring in the string, starting from 1.
For a case-insensitive search, use the function [positionCaseInsensitive](#positioncaseinsensitive).
@ -20,15 +22,22 @@ For a case-insensitive search, use the function [positionCaseInsensitive](#posit
``` sql
position(haystack, needle[, start_pos])
```
```
``` sql
position(needle IN haystack)
```
Alias: `locate(haystack, needle[, start_pos])`.
!!! note "Note"
Syntax of `position(needle IN haystack)` provides SQL-compatibility, the function works the same way as to `position(haystack, needle)`.
**Arguments**
- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md).
- `start_pos` Position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). Optional.
**Returned values**
@ -83,6 +92,36 @@ Result:
└───────────────────────────────┘
```
**Examples for POSITION(needle IN haystack) syntax**
Query:
```sql
SELECT 3 = position('c' IN 'abc');
```
Result:
```text
┌─equals(3, position('abc', 'c'))─┐
│ 1 │
└─────────────────────────────────┘
```
Query:
```sql
SELECT 6 = position('/' IN s) FROM (SELECT 'Hello/World' AS s);
```
Result:
```text
┌─equals(6, position(s, '/'))─┐
│ 1 │
└─────────────────────────────┘
```
## positionCaseInsensitive {#positioncaseinsensitive}
The same as [position](#position) returns the position (in bytes) of the found substring in the string, starting from 1. Use the function for a case-insensitive search.
@ -772,4 +811,3 @@ Result:
│ 2 │
└───────────────────────────────┘
```

View File

@ -111,4 +111,55 @@ Result:
- [Tuple](../../sql-reference/data-types/tuple.md)
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/tuple-functions/) <!--hide-->
## tupleHammingDistance {#tuplehammingdistance}
Returns the [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) between two tuples of the same size.
**Syntax**
``` sql
tupleHammingDistance(tuple1, tuple2)
```
**Arguments**
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
Tuples should have the same type of the elements.
**Returned value**
- The Hamming distance.
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
**Examples**
Query:
``` sql
SELECT tupleHammingDistance((1, 2, 3), (3, 2, 1)) AS HammingDistance;
```
Result:
``` text
┌─HammingDistance─┐
│ 2 │
└─────────────────┘
```
Can be used with [MinHash](../../sql-reference/functions/hash-functions.md#ngramminhash) functions for detection of semi-duplicate strings:
``` sql
SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseInsensitive(string)) as HammingDistance FROM (SELECT 'Clickhouse is a column-oriented database management system for online analytical processing of queries.' AS string);
```
Result:
``` text
┌─HammingDistance─┐
│ 2 │
└─────────────────┘
```

View File

@ -385,8 +385,6 @@ reinterpretAsUUID(fixed_string)
- `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring).
## reinterpret(x, T) {#type_conversion_function-reinterpret}
**Returned value**
- The UUID type value. [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type).
@ -398,9 +396,7 @@ String to UUID.
Query:
``` sql
SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint,
reinterpret(toInt8(1), 'Float32') as int_to_float,
reinterpret('1', 'UInt32') as string_to_int;
SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')));
```
Result:
@ -431,15 +427,51 @@ Result:
└─────────────────────┘
```
## reinterpret(x, T) {#type_conversion_function-reinterpret}
Use the same source in-memory bytes sequence for `x` value and reinterpret it to destination type
Query:
```sql
SELECT reinterpret(toInt8(-1), 'UInt8') as int_to_uint,
reinterpret(toInt8(1), 'Float32') as int_to_float,
reinterpret('1', 'UInt32') as string_to_int;
```
Result:
```
┌─int_to_uint─┬─int_to_float─┬─string_to_int─┐
│ 255 │ 1e-45 │ 49 │
└─────────────┴──────────────┴───────────────┘
```
## CAST(x, T) {#type_conversion_function-cast}
Converts input value `x` to the `T` data type.
Converts input value `x` to the `T` data type. Unlike to `reinterpret` function use external representation of `x` value.
The syntax `CAST(x AS t)` is also supported.
Note, that if value `x` does not fit the bounds of type T, the function overflows. For example, CAST(-1, 'UInt8') returns 255.
**Example**
**Examples**
Query:
```sql
SELECT
cast(toInt8(-1), 'UInt8') AS cast_int_to_uint,
cast(toInt8(1), 'Float32') AS cast_int_to_float,
cast('1', 'UInt32') AS cast_string_to_int
```
Result:
```
┌─cast_int_to_uint─┬─cast_int_to_float─┬─cast_string_to_int─┐
│ 255 │ 1 │ 1 │
└──────────────────┴───────────────────┴────────────────────┘
```
Query:
@ -634,6 +666,7 @@ Result:
```
## parseDateTimeBestEffort {#parsedatetimebesteffort}
## parseDateTime32BestEffort {#parsedatetime32besteffort}
Converts a date and time in the [String](../../sql-reference/data-types/string.md) representation to [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) data type.
@ -822,10 +855,12 @@ Result:
```
## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull}
## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull}
Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns null when it encounters a date format that cannot be processed.
Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero}
## parseDateTime32BestEffortOrZero {#parsedatetime32besteffortorzero}
Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
@ -1001,6 +1036,57 @@ Result:
└─────────────────────────────────┘
```
## parseDateTime64BestEffort {#parsedatetime64besteffort}
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and return `DateTime64(3)` or `DateTime64(6)` data types.
**Syntax**
``` sql
parseDateTime64BestEffort(time_string [, precision [, time_zone]])
```
**Parameters**
- `time_string` — String containing a date or date with time to convert. [String](../../sql-reference/data-types/string.md).
- `precision``3` for milliseconds, `6` for microseconds. Default `3`. Optional [UInt8](../../sql-reference/data-types/int-uint.md).
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Examples**
Query:
```sql
SELECT parseDateTime64BestEffort('2021-01-01') AS a, toTypeName(a) AS t
UNION ALL
SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346') AS a, toTypeName(a) AS t
UNION ALL
SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',6) AS a, toTypeName(a) AS t
UNION ALL
SELECT parseDateTime64BestEffort('2021-01-01 01:01:00.12346',3,'Europe/Moscow') AS a, toTypeName(a) AS t
FORMAT PrettyCompactMonoBlcok
```
Result:
```
┌──────────────────────────a─┬─t──────────────────────────────┐
│ 2021-01-01 01:01:00.123000 │ DateTime64(3) │
│ 2021-01-01 00:00:00.000000 │ DateTime64(3) │
│ 2021-01-01 01:01:00.123460 │ DateTime64(6) │
│ 2020-12-31 22:01:00.123000 │ DateTime64(3, 'Europe/Moscow') │
└────────────────────────────┴────────────────────────────────┘
```
## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull}
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero}
Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
## toLowCardinality {#tolowcardinality}
Converts input parameter to the [LowCardianlity](../../sql-reference/data-types/lowcardinality.md) version of same data type.
@ -1009,7 +1095,7 @@ To convert data from the `LowCardinality` data type use the [CAST](#type_convers
**Syntax**
``` sql
```sql
toLowCardinality(expr)
```
@ -1027,7 +1113,7 @@ Type: `LowCardinality(expr_result_type)`
Query:
``` sql
```sql
SELECT toLowCardinality('1');
```
@ -1045,7 +1131,8 @@ Result:
## toUnixTimestamp64Nano {#tounixtimestamp64nano}
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision. Please note that output value is a timestamp in UTC, not in timezone of `DateTime64`.
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision.
Input value is scaled up or down appropriately depending on it precision. Please note that output value is a timestamp in UTC, not in timezone of `DateTime64`.
**Syntax**
@ -1078,6 +1165,8 @@ Result:
└──────────────────────────────┘
```
Query:
``` sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
SELECT toUnixTimestamp64Nano(dt64);

View File

@ -144,7 +144,7 @@ This query changes the `name` column properties:
- TTL
For examples of columns TTL modifying, see [Column TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl).
For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
If the `IF EXISTS` clause is specified, the query wont return an error if the column doesnt exist.

View File

@ -40,7 +40,7 @@ Read about setting the partition expression in a section [How to specify the par
After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it.
This query is replicated it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replica.
This query is replicated it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replicas (as multiple leaders are allowed).
## DROP PARTITION\|PART {#alter_drop-partition}
@ -85,9 +85,15 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. All other replicas download the data from the replica-initiator.
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
So you can put data to the `detached` directory on one replica, and use the `ALTER ... ATTACH` query to add it to the table on all replicas.
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own
`detached` folder, it attaches the data without fetching it from other replicas.
If there is no part with the correct checksums, the data is downloaded from any replica having the part.
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the
table on all replicas.
## ATTACH PARTITION FROM {#alter_attach-partition-from}
@ -95,7 +101,8 @@ So you can put data to the `detached` directory on one replica, and use the `ALT
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
```
This query copies the data partition from the `table1` to `table2` adds data to exsisting in the `table2`. Note that data wont be deleted from `table1`.
This query copies the data partition from the `table1` to `table2`.
Note that data won't be deleted neither from `table1` nor from `table2`.
For the query to run successfully, the following conditions must be met:

View File

@ -68,7 +68,7 @@ To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop
!!! important "Important"
This is an experimental feature that may change in backwards-incompatible ways in the future releases.
Enable usage of live views and `WATCH` query using `set allow_experimental_live_view = 1`.
Enable usage of live views and `WATCH` query using [allow_experimental_live_view](../../../operations/settings/settings.md#allow-experimental-live-view) setting. Input the command `set allow_experimental_live_view = 1`.
```sql
@ -90,7 +90,9 @@ Live views work similarly to how a query in a distributed table works. But inste
See [WITH REFRESH](#live-view-with-refresh) to force periodic updates of a live view that in some cases can be used as a workaround.
You can watch for changes in the live view query result using the [WATCH](../../../sql-reference/statements/watch.md) query
### Monitoring Changes {#live-view-monitoring}
You can monitor changes in the `LIVE VIEW` query result using [WATCH](../../../sql-reference/statements/watch.md) query.
```sql
WATCH [db.]live_view
@ -102,11 +104,10 @@ WATCH [db.]live_view
CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
CREATE LIVE VIEW lv AS SELECT sum(x) FROM mt;
```
Watch a live view while doing a parallel insert into the source table.
```sql
WATCH lv
WATCH lv;
```
```bash
@ -128,16 +129,16 @@ INSERT INTO mt VALUES (2);
INSERT INTO mt VALUES (3);
```
or add [EVENTS](../../../sql-reference/statements/watch.md#events-clause) clause to just get change events.
Or add [EVENTS](../../../sql-reference/statements/watch.md#events-clause) clause to just get change events.
```sql
WATCH [db.]live_view EVENTS
WATCH [db.]live_view EVENTS;
```
**Example:**
```sql
WATCH lv EVENTS
WATCH lv EVENTS;
```
```bash
@ -163,15 +164,15 @@ SELECT * FROM [db.]live_view WHERE ...
You can force live view refresh using the `ALTER LIVE VIEW [db.]table_name REFRESH` statement.
### With Timeout {#live-view-with-timeout}
### WITH TIMEOUT Clause {#live-view-with-timeout}
When a live view is create with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view.
When a live view is created with a `WITH TIMEOUT` clause then the live view will be dropped automatically after the specified number of seconds elapse since the end of the last [WATCH](../../../sql-reference/statements/watch.md) query that was watching the live view.
```sql
CREATE LIVE VIEW [db.]table_name WITH TIMEOUT [value_in_sec] AS SELECT ...
```
If the timeout value is not specified then the value specified by the `temporary_live_view_timeout` setting is used.
If the timeout value is not specified then the value specified by the [temporary_live_view_timeout](../../../operations/settings/settings.md#temporary-live-view-timeout) setting is used.
**Example:**
@ -180,7 +181,7 @@ CREATE TABLE mt (x Int8) Engine = MergeTree ORDER BY x;
CREATE LIVE VIEW lv WITH TIMEOUT 15 AS SELECT sum(x) FROM mt;
```
### With Refresh {#live-view-with-refresh}
### WITH REFRESH Clause {#live-view-with-refresh}
When a live view is created with a `WITH REFRESH` clause then it will be automatically refreshed after the specified number of seconds elapse since the last refresh or trigger.
@ -188,7 +189,7 @@ When a live view is created with a `WITH REFRESH` clause then it will be automat
CREATE LIVE VIEW [db.]table_name WITH REFRESH [value_in_sec] AS SELECT ...
```
If the refresh value is not specified then the value specified by the `periodic_live_view_refresh` setting is used.
If the refresh value is not specified then the value specified by the [periodic_live_view_refresh](../../../operations/settings/settings.md#periodic-live-view-refresh) setting is used.
**Example:**
@ -231,7 +232,7 @@ WATCH lv
Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.lv doesn't exist..
```
### Usage
### Usage {#live-view-usage}
Most common uses of live view tables include:
@ -240,15 +241,4 @@ Most common uses of live view tables include:
- Watching for table changes and triggering a follow-up select queries.
- Watching metrics from system tables using periodic refresh.
### Settings {#live-view-settings}
You can use the following settings to control the behaviour of live views.
- `allow_experimental_live_view` - enable live views. Default is `0`.
- `live_view_heartbeat_interval` - the heartbeat interval in seconds to indicate live query is alive. Default is `15` seconds.
- `max_live_view_insert_blocks_before_refresh` - maximum number of inserted blocks after which
mergeable blocks are dropped and query is re-executed. Default is `64` inserts.
- `temporary_live_view_timeout` - interval after which live view with timeout is deleted. Default is `5` seconds.
- `periodic_live_view_refresh` - interval after which periodically refreshed live view is forced to refresh. Default is `60` seconds.
[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/create/view/) <!--hide-->

View File

@ -264,6 +264,10 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
```
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from
the common replicated log into its own replication queue, and then the query waits till the replica processes all
of the fetched commands.
### RESTART REPLICA {#query_language-system-restart-replica}
Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed

View File

@ -17,19 +17,21 @@ WATCH [db.]live_view
[FORMAT format]
```
The `WATCH` query performs continuous data retrieval from a [live view](./create/view.md#live-view) table. Unless the `LIMIT` clause is specified it provides an infinite stream of query results from a [live view](./create/view.md#live-view).
The `WATCH` query performs continuous data retrieval from a [LIVE VIEW](./create/view.md#live-view) table. Unless the `LIMIT` clause is specified it provides an infinite stream of query results from a [LIVE VIEW](./create/view.md#live-view).
```sql
WATCH [db.]live_view
WATCH [db.]live_view [EVENTS] [LIMIT n] [FORMAT format]
```
## Virtual columns {#watch-virtual-columns}
The virtual `_version` column in the query result indicates the current result version.
**Example:**
```sql
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
WATCH lv
WATCH lv;
```
```bash
@ -47,6 +49,8 @@ WATCH lv
By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../sql-reference/statements/insert-into.md) it can be forwarded to a different table.
**Example:**
```sql
INSERT INTO [db.]table WATCH [db.]live_view ...
```
@ -56,14 +60,14 @@ INSERT INTO [db.]table WATCH [db.]live_view ...
The `EVENTS` clause can be used to obtain a short form of the `WATCH` query where instead of the query result you will just get the latest query result version.
```sql
WATCH [db.]live_view EVENTS
WATCH [db.]live_view EVENTS;
```
**Example:**
```sql
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
WATCH lv EVENTS
WATCH lv EVENTS;
```
```bash
@ -78,17 +82,17 @@ WATCH lv EVENTS
## LIMIT Clause {#limit-clause}
The `LIMIT n` clause species the number of updates the `WATCH` query should wait for before terminating. By default there is no limit on the number of updates and therefore the query will not terminate. The value of `0` indicates that the `WATCH` query should not wait for any new query results and therefore will return immediately once query is evaluated.
The `LIMIT n` clause specifies the number of updates the `WATCH` query should wait for before terminating. By default there is no limit on the number of updates and therefore the query will not terminate. The value of `0` indicates that the `WATCH` query should not wait for any new query results and therefore will return immediately once query result is evaluated.
```sql
WATCH [db.]live_view LIMIT 1
WATCH [db.]live_view LIMIT 1;
```
**Example:**
```sql
CREATE LIVE VIEW lv WITH REFRESH 5 AS SELECT now();
WATCH lv EVENTS LIMIT 1
WATCH lv EVENTS LIMIT 1;
```
```bash
@ -102,5 +106,4 @@ WATCH lv EVENTS LIMIT 1
The `FORMAT` clause works the same way as for the [SELECT](../../sql-reference/statements/select/format.md#format-clause).
!!! info "Note"
The [JSONEachRowWithProgress](../../interfaces/formats/#jsoneachrowwithprogress) format should be used when watching [live view](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting.
The [JSONEachRowWithProgress](../../interfaces/formats.md#jsoneachrowwithprogress) format should be used when watching [LIVE VIEW](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting.

View File

@ -21,16 +21,18 @@ You can use table functions in:
!!! warning "Warning"
You cant use table functions if the [allow_ddl](../../operations/settings/permissions-for-queries.md#settings_allow_ddl) setting is disabled.
| Function | Description |
|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|
| [file](../../sql-reference/table-functions/file.md) | Creates a File-engine table. |
| [merge](../../sql-reference/table-functions/merge.md) | Creates a Merge-engine table. |
| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. |
| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a Distributed-engine table. |
| [url](../../sql-reference/table-functions/url.md) | Creates a URL-engine table. |
| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a MySQL-engine table. |
| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a PostgreSQL-engine table. |
| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a JDBC-engine table. |
| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a ODBC-engine table. |
| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a HDFS-engine table. |
| [s3](../../sql-reference/table-functions/s3.md) | Creates a S3-engine table. |
| Function | Description |
|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|
| [file](../../sql-reference/table-functions/file.md) | Creates a [File](../../engines/table-engines/special/file.md)-engine table. |
| [merge](../../sql-reference/table-functions/merge.md) | Creates a [Merge](../../engines/table-engines/special/merge.md)-engine table. |
| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. |
| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. |
| [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. |
| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. |
| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/postgresql.md)-engine table. |
| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. |
| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. |
| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. |
| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. |
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/) <!--hide-->

View File

@ -10,33 +10,17 @@ Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a
**Syntax**
``` sql
postgresql('host:port', 'database', 'table', 'user', 'password')
postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
```
**Arguments**
- `host:port` — PostgreSQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — PostgreSQL user.
- `password` — User password.
SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query.
Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server.
All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes.
INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement.
PostgreSQL Array types converts into ClickHouse arrays.
Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows.
- `schema` — Non-default table schema. Optional.
**Returned Value**
@ -45,6 +29,23 @@ A table object with the same columns as the original PostgreSQL table.
!!! info "Note"
In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below.
## Implementation Details {#implementation-details}
`SELECT` queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query.
Simple `WHERE` clauses such as `=`, `!=`, `>`, `>=`, `<`, `<=`, and `IN` are executed on the PostgreSQL server.
All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes.
`INSERT` queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement.
PostgreSQL Array types converts into ClickHouse arrays.
!!! info "Note"
Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows.
Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`.
**Examples**
Table in PostgreSQL:
@ -60,10 +61,10 @@ PRIMARY KEY (int_id));
CREATE TABLE
postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2);
postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> select * from test;
postgresql> SELECT * FROM test;
int_id | int_nullable | float | str | float_nullable
--------+--------------+-------+------+----------------
1 | | 2 | test |
@ -96,9 +97,24 @@ SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'p
└────────┴──────────────┴───────┴──────┴────────────────┘
```
Using Non-default Schema:
```text
postgres=# CREATE SCHEMA "nice.schema";
postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer);
postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i)
```
```sql
CREATE TABLE pg_table_schema_with_dots (a UInt32)
ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema');
```
**See Also**
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
- [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/postgresql/) <!--hide-->

View File

@ -23,7 +23,9 @@ ClickHouse supports the standard grammar for defining windows and window functio
| `GROUPS` frame | not supported |
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | all aggregate functions are supported |
| `rank()`, `dense_rank()`, `row_number()` | supported |
| `lag/lead(value, offset)` | not supported, replace with `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead`|
| `lag/lead(value, offset)` | Not supported. Workarounds: |
| | 1) replace with `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead`|
| | 2) use `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
## References

View File

@ -1,23 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 1
toc_title: Nube
---
# Proveedores de servicios en la nube de ClickHouse {#clickhouse-cloud-service-providers}
!!! info "INFO"
Si ha lanzado una nube pública con el servicio ClickHouse administrado, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) añadiéndolo a la siguiente lista.
## Nube de Yandex {#yandex-cloud}
[Servicio administrado de Yandex para ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) proporciona las siguientes características clave:
- Servicio ZooKeeper totalmente gestionado para [Replicación de ClickHouse](../engines/table-engines/mergetree-family/replication.md)
- Múltiples opciones de tipo de almacenamiento
- Réplicas en diferentes zonas de disponibilidad
- Cifrado y aislamiento
- Mantenimiento automatizado
{## [Artículo Original](https://clickhouse.tech/docs/en/commercial/cloud/) ##}

View File

@ -1,9 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_folder_title: Comercial
toc_priority: 70
toc_title: Comercial
---

View File

@ -1,23 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 3
toc_title: Apoyo
---
# Proveedores de servicios de soporte comercial ClickHouse {#clickhouse-commercial-support-service-providers}
!!! info "INFO"
Si ha lanzado un servicio de soporte comercial ClickHouse, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) añadiéndolo a la siguiente lista.
## Altinidad {#altinity}
Altinity ha ofrecido soporte y servicios empresariales ClickHouse desde 2017. Los clientes de Altinity van desde empresas Fortune 100 hasta startups. Visitar [Más información](https://www.altinity.com/) para más información.
## Mafiree {#mafiree}
[Descripción del servicio](http://mafiree.com/clickhouse-analytics-services.php)
## MinervaDB {#minervadb}
[Descripción del servicio](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/)

View File

@ -1,203 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 62
toc_title: "Descripci\xF3n general de la arquitectura ClickHouse"
---
# Descripción general de la arquitectura ClickHouse {#overview-of-clickhouse-architecture}
ClickHouse es un verdadero DBMS orientado a columnas. Los datos se almacenan por columnas y durante la ejecución de matrices (vectores o fragmentos de columnas). Siempre que sea posible, las operaciones se envían en matrices, en lugar de en valores individuales. Se llama “vectorized query execution,” y ayuda a reducir el costo del procesamiento de datos real.
> Esta idea no es nada nuevo. Se remonta a la `APL` lenguaje de programación y sus descendientes: `A +`, `J`, `K`, y `Q`. La programación de matrices se utiliza en el procesamiento de datos científicos. Tampoco es esta idea algo nuevo en las bases de datos relacionales: por ejemplo, se usa en el `Vectorwise` sistema.
Existen dos enfoques diferentes para acelerar el procesamiento de consultas: la ejecución de consultas vectorizadas y la generación de código en tiempo de ejecución. Este último elimina toda la indirección y el despacho dinámico. Ninguno de estos enfoques es estrictamente mejor que el otro. La generación de código de tiempo de ejecución puede ser mejor cuando fusiona muchas operaciones, utilizando así las unidades de ejecución de la CPU y la canalización. La ejecución de consultas vectorizadas puede ser menos práctica porque implica vectores temporales que deben escribirse en la memoria caché y leerse. Si los datos temporales no caben en la memoria caché L2, esto se convierte en un problema. Pero la ejecución de consultas vectorizadas utiliza más fácilmente las capacidades SIMD de la CPU. Un [documento de investigación](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) escrito por nuestros amigos muestra que es mejor combinar ambos enfoques. ClickHouse utiliza la ejecución de consultas vectorizadas y tiene un soporte inicial limitado para la generación de código en tiempo de ejecución.
## Columna {#columns}
`IColumn` interfaz se utiliza para representar columnas en la memoria (en realidad, fragmentos de columnas). Esta interfaz proporciona métodos auxiliares para la implementación de varios operadores relacionales. Casi todas las operaciones son inmutables: no modifican la columna original, sino que crean una nueva modificada. Por ejemplo, el `IColumn :: filter` método acepta una máscara de bytes de filtro. Se utiliza para el `WHERE` y `HAVING` operadores relacionales. Ejemplos adicionales: el `IColumn :: permute` para apoyar `ORDER BY`, el `IColumn :: cut` para apoyar `LIMIT`.
Diversos `IColumn` aplicación (`ColumnUInt8`, `ColumnString`, y así sucesivamente) son responsables del diseño de memoria de las columnas. El diseño de memoria suele ser una matriz contigua. Para el tipo entero de columnas, es solo una matriz contigua, como `std :: vector`. Para `String` y `Array` columnas, son dos vectores: uno para todos los elementos de la matriz, colocados contiguamente, y un segundo para los desplazamientos al comienzo de cada matriz. También hay `ColumnConst` que almacena solo un valor en la memoria, pero parece una columna.
## Campo {#field}
Sin embargo, también es posible trabajar con valores individuales. Para representar un valor individual, el `Field` se utiliza. `Field` es sólo una unión discriminada de `UInt64`, `Int64`, `Float64`, `String` y `Array`. `IColumn` tiene el `operator[]` para obtener el valor n-ésimo como un `Field` y el `insert` método para agregar un `Field` al final de una columna. Estos métodos no son muy eficientes, ya que requieren tratar con temporal `Field` objetos que representan un valor individual. Hay métodos más eficientes, tales como `insertFrom`, `insertRangeFrom` y así sucesivamente.
`Field` no tiene suficiente información sobre un tipo de datos específico para una tabla. Por ejemplo, `UInt8`, `UInt16`, `UInt32`, y `UInt64` todos están representados como `UInt64` en una `Field`.
## Abstracciones con fugas {#leaky-abstractions}
`IColumn` tiene métodos para transformaciones relacionales comunes de datos, pero no satisfacen todas las necesidades. Por ejemplo, `ColumnUInt64` no tiene un método para calcular la suma de dos columnas, y `ColumnString` no tiene un método para ejecutar una búsqueda de subcadena. Estas innumerables rutinas se implementan fuera de `IColumn`.
Varias funciones en columnas se pueden implementar de una manera genérica, no eficiente utilizando `IColumn` para extraer `Field` valores, o de una manera especializada utilizando el conocimiento del diseño de la memoria interna de los datos en un `IColumn` aplicación. Se implementa mediante la conversión de funciones a un `IColumn` escriba y trate con la representación interna directamente. Por ejemplo, `ColumnUInt64` tiene el `getData` método que devuelve una referencia a una matriz interna, luego una rutina separada lee o llena esa matriz directamente. Tenemos “leaky abstractions” para permitir especializaciones eficientes de varias rutinas.
## Tipos de datos {#data_types}
`IDataType` es responsable de la serialización y deserialización: para leer y escribir fragmentos de columnas o valores individuales en formato binario o de texto. `IDataType` corresponde directamente a los tipos de datos en las tablas. Por ejemplo, hay `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` y así sucesivamente.
`IDataType` y `IColumn` están vagamente relacionados entre sí. Diferentes tipos de datos se pueden representar en la memoria por el mismo `IColumn` aplicación. Por ejemplo, `DataTypeUInt32` y `DataTypeDateTime` están representados por `ColumnUInt32` o `ColumnConstUInt32`. Además, el mismo tipo de datos se puede representar mediante `IColumn` aplicación. Por ejemplo, `DataTypeUInt8` puede ser representado por `ColumnUInt8` o `ColumnConstUInt8`.
`IDataType` sólo almacena metadatos. Por ejemplo, `DataTypeUInt8` no almacena nada en absoluto (excepto vptr) y `DataTypeFixedString` tiendas solo `N` (el tamaño de las cadenas de tamaño fijo).
`IDataType` tiene métodos auxiliares para varios formatos de datos. Los ejemplos son métodos para serializar un valor con posibles citas, para serializar un valor para JSON y para serializar un valor como parte del formato XML. No hay correspondencia directa con los formatos de datos. Por ejemplo, los diferentes formatos de datos `Pretty` y `TabSeparated` puede utilizar el mismo `serializeTextEscaped` método de ayuda de la `IDataType` interfaz.
## Bloque {#block}
A `Block` es un contenedor que representa un subconjunto (porción) de una tabla en la memoria. Es sólo un conjunto de triples: `(IColumn, IDataType, column name)`. Durante la ejecución de la consulta, los datos son procesados por `Block`s. Si tenemos un `Block`, tenemos datos (en el `IColumn` objeto), tenemos información sobre su tipo (en `IDataType`) que nos dice cómo lidiar con esa columna, y tenemos el nombre de la columna. Podría ser el nombre de columna original de la tabla o algún nombre artificial asignado para obtener resultados temporales de los cálculos.
Cuando calculamos alguna función sobre columnas en un bloque, agregamos otra columna con su resultado al bloque, y no tocamos columnas para argumentos de la función porque las operaciones son inmutables. Más tarde, las columnas innecesarias se pueden eliminar del bloque, pero no se pueden modificar. Es conveniente para la eliminación de subexpresiones comunes.
Se crean bloques para cada fragmento de datos procesado. Tenga en cuenta que para el mismo tipo de cálculo, los nombres y tipos de columna siguen siendo los mismos para diferentes bloques y solo cambian los datos de columna. Es mejor dividir los datos del bloque desde el encabezado del bloque porque los tamaños de bloque pequeños tienen una gran sobrecarga de cadenas temporales para copiar shared_ptrs y nombres de columna.
## Bloquear flujos {#block-streams}
Los flujos de bloques son para procesar datos. Usamos flujos de bloques para leer datos de algún lugar, realizar transformaciones de datos o escribir datos en algún lugar. `IBlockInputStream` tiene el `read` método para buscar el siguiente bloque mientras esté disponible. `IBlockOutputStream` tiene el `write` método para empujar el bloque en alguna parte.
Los flujos son responsables de:
1. Leer o escribir en una mesa. La tabla solo devuelve una secuencia para leer o escribir bloques.
2. Implementación de formatos de datos. Por ejemplo, si desea enviar datos a un terminal en `Pretty` formato, crea un flujo de salida de bloque donde presiona bloques y los formatea.
3. Realización de transformaciones de datos. Digamos que tienes `IBlockInputStream` y desea crear una secuencia filtrada. Usted crea `FilterBlockInputStream` e inicializarlo con su transmisión. Luego, cuando tiras de un bloque de `FilterBlockInputStream`, extrae un bloque de su flujo, lo filtra y le devuelve el bloque filtrado. Las canalizaciones de ejecución de consultas se representan de esta manera.
Hay transformaciones más sofisticadas. Por ejemplo, cuando tiras de `AggregatingBlockInputStream`, lee todos los datos de su origen, los agrega y, a continuación, devuelve un flujo de datos agregados para usted. Otro ejemplo: `UnionBlockInputStream` acepta muchas fuentes de entrada en el constructor y también una serie de subprocesos. Lanza múltiples hilos y lee de múltiples fuentes en paralelo.
> Las secuencias de bloques usan el “pull” enfoque para controlar el flujo: cuando extrae un bloque de la primera secuencia, en consecuencia extrae los bloques requeridos de las secuencias anidadas, y toda la tubería de ejecución funcionará. Ni “pull” ni “push” es la mejor solución, porque el flujo de control está implícito y eso limita la implementación de varias características, como la ejecución simultánea de múltiples consultas (fusionando muchas tuberías). Esta limitación podría superarse con coroutines o simplemente ejecutando hilos adicionales que se esperan el uno al otro. Podemos tener más posibilidades si hacemos explícito el flujo de control: si localizamos la lógica para pasar datos de una unidad de cálculo a otra fuera de esas unidades de cálculo. Lea esto [artículo](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) para más pensamientos.
Debemos tener en cuenta que la canalización de ejecución de consultas crea datos temporales en cada paso. Tratamos de mantener el tamaño del bloque lo suficientemente pequeño para que los datos temporales se ajusten a la memoria caché de la CPU. Con esa suposición, escribir y leer datos temporales es casi gratis en comparación con otros cálculos. Podríamos considerar una alternativa, que es fusionar muchas operaciones en la tubería. Podría hacer que la tubería sea lo más corta posible y eliminar gran parte de los datos temporales, lo que podría ser una ventaja, pero también tiene inconvenientes. Por ejemplo, una canalización dividida facilita la implementación de almacenamiento en caché de datos intermedios, el robo de datos intermedios de consultas similares que se ejecutan al mismo tiempo y la fusión de canalizaciones para consultas similares.
## Formato {#formats}
Los formatos de datos se implementan con flujos de bloques. Hay “presentational” sólo es adecuado para la salida de datos al cliente, tales como `Pretty` formato, que proporciona sólo `IBlockOutputStream`. Y hay formatos de entrada / salida, como `TabSeparated` o `JSONEachRow`.
También hay secuencias de filas: `IRowInputStream` y `IRowOutputStream`. Permiten pull/push datos por filas individuales, no por bloques. Y solo son necesarios para simplificar la implementación de formatos orientados a filas. Envoltura `BlockInputStreamFromRowInputStream` y `BlockOutputStreamFromRowOutputStream` le permite convertir flujos orientados a filas en flujos regulares orientados a bloques.
## I/O {#io}
Para la entrada / salida orientada a bytes, hay `ReadBuffer` y `WriteBuffer` clases abstractas. Se usan en lugar de C ++ `iostream`s. No se preocupe: cada proyecto maduro de C ++ está usando algo más que `iostream`s por buenas razones.
`ReadBuffer` y `WriteBuffer` son solo un búfer contiguo y un cursor apuntando a la posición en ese búfer. Las implementaciones pueden poseer o no la memoria del búfer. Hay un método virtual para llenar el búfer con los siguientes datos (para `ReadBuffer`) o para vaciar el búfer en algún lugar (para `WriteBuffer`). Los métodos virtuales rara vez se llaman.
Implementaciones de `ReadBuffer`/`WriteBuffer` se utilizan para trabajar con archivos y descriptores de archivos y sockets de red, para implementar la compresión (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes the names `ConcatReadBuffer`, `LimitReadBuffer`, y `HashingWriteBuffer` hablar por sí mismos.
Read / WriteBuffers solo se ocupan de bytes. Hay funciones de `ReadHelpers` y `WriteHelpers` archivos de encabezado para ayudar con el formato de entrada / salida. Por ejemplo, hay ayudantes para escribir un número en formato decimal.
Veamos qué sucede cuando quieres escribir un conjunto de resultados en `JSON` formato a stdout. Tiene un conjunto de resultados listo para ser recuperado de `IBlockInputStream`. Usted crea `WriteBufferFromFileDescriptor(STDOUT_FILENO)` para escribir bytes en stdout. Usted crea `JSONRowOutputStream`, inicializado con eso `WriteBuffer` para escribir filas en `JSON` a stdout. Usted crea `BlockOutputStreamFromRowOutputStream` encima de él, para representarlo como `IBlockOutputStream`. Entonces usted llama `copyData` para transferir datos desde `IBlockInputStream` a `IBlockOutputStream` y todo funciona. Internamente, `JSONRowOutputStream` escribirá varios delimitadores JSON y llamará al `IDataType::serializeTextJSON` con una referencia a `IColumn` y el número de fila como argumentos. Consecuentemente, `IDataType::serializeTextJSON` llamará a un método de `WriteHelpers.h`: por ejemplo, `writeText` para tipos numéricos y `writeJSONString` para `DataTypeString`.
## Tabla {#tables}
El `IStorage` interfaz representa tablas. Las diferentes implementaciones de esa interfaz son diferentes motores de tabla. Los ejemplos son `StorageMergeTree`, `StorageMemory` y así sucesivamente. Las instancias de estas clases son solo tablas.
Clave `IStorage` son `read` y `write`. También hay `alter`, `rename`, `drop` y así sucesivamente. El `read` método acepta los siguientes argumentos: el conjunto de columnas para leer de una tabla, el `AST` consulta a considerar, y el número deseado de flujos para devolver. Devuelve uno o varios `IBlockInputStream` objetos e información sobre la etapa de procesamiento de datos que se completó dentro de un motor de tablas durante la ejecución de la consulta.
En la mayoría de los casos, el método de lectura solo es responsable de leer las columnas especificadas de una tabla, no de ningún procesamiento de datos adicional. Todo el procesamiento de datos adicional es realizado por el intérprete de consultas y está fuera de la responsabilidad de `IStorage`.
Pero hay excepciones notables:
- La consulta AST se pasa al `read` método, y el motor de tablas puede usarlo para derivar el uso del índice y leer menos datos de una tabla.
- A veces, el motor de tablas puede procesar los datos a una etapa específica. Por ejemplo, `StorageDistributed` puede enviar una consulta a servidores remotos, pedirles que procesen datos a una etapa donde se puedan fusionar datos de diferentes servidores remotos y devolver esos datos preprocesados. El intérprete de consultas termina de procesar los datos.
Tabla `read` método puede devolver múltiples `IBlockInputStream` objetos para permitir el procesamiento de datos en paralelo. Estos flujos de entrada de bloques múltiples pueden leer de una tabla en paralelo. A continuación, puede ajustar estas secuencias con varias transformaciones (como la evaluación de expresiones o el filtrado) que se pueden calcular de forma independiente y crear un `UnionBlockInputStream` encima de ellos, para leer desde múltiples flujos en paralelo.
También hay `TableFunction`s. Estas son funciones que devuelven un `IStorage` objeto a utilizar en el `FROM` cláusula de una consulta.
Para tener una idea rápida de cómo implementar su motor de tabla, vea algo simple, como `StorageMemory` o `StorageTinyLog`.
> Como resultado de la `read` método, `IStorage` devoluciones `QueryProcessingStage` information about what parts of the query were already calculated inside storage.
## Analizador {#parsers}
Un analizador de descenso recursivo escrito a mano analiza una consulta. Por ejemplo, `ParserSelectQuery` simplemente llama recursivamente a los analizadores subyacentes para varias partes de la consulta. Los analizadores crean un `AST`. El `AST` está representado por nodos, que son instancias de `IAST`.
> Los generadores de analizadores no se utilizan por razones históricas.
## Interprete {#interpreters}
Los intérpretes son responsables de crear la canalización de ejecución de consultas `AST`. Hay intérpretes simples, como `InterpreterExistsQuery` y `InterpreterDropQuery` o el más sofisticado `InterpreterSelectQuery`. La canalización de ejecución de consultas es una combinación de flujos de entrada o salida de bloques. Por ejemplo, el resultado de interpretar el `SELECT` la consulta es la `IBlockInputStream` para leer el conjunto de resultados; el resultado de la consulta INSERT es el `IBlockOutputStream` para escribir datos para su inserción, y el resultado de interpretar el `INSERT SELECT` la consulta es la `IBlockInputStream` que devuelve un conjunto de resultados vacío en la primera lectura, pero que copia datos de `SELECT` a `INSERT` al mismo tiempo.
`InterpreterSelectQuery` utilizar `ExpressionAnalyzer` y `ExpressionActions` maquinaria para el análisis de consultas y transformaciones. Aquí es donde se realizan la mayoría de las optimizaciones de consultas basadas en reglas. `ExpressionAnalyzer` es bastante complicado y debe reescribirse: se deben extraer varias transformaciones de consultas y optimizaciones para separar clases para permitir transformaciones modulares o consultas.
## Función {#functions}
Hay funciones ordinarias y funciones agregadas. Para las funciones agregadas, consulte la siguiente sección.
Ordinary functions don't change the number of rows they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`de datos para implementar la ejecución de consultas vectorizadas.
Hay algunas funciones diversas, como [BlockSize](../sql-reference/functions/other-functions.md#function-blocksize), [rowNumberInBlock](../sql-reference/functions/other-functions.md#function-rownumberinblock), y [runningAccumulate](../sql-reference/functions/other-functions.md#function-runningaccumulate), que explotan el procesamiento de bloques y violan la independencia de las filas.
ClickHouse tiene una tipificación fuerte, por lo que no hay conversión de tipo implícita. Si una función no admite una combinación específica de tipos, produce una excepción. Pero las funciones pueden funcionar (estar sobrecargadas) para muchas combinaciones diferentes de tipos. Por ejemplo, el `plus` función (para implementar el `+` operador) funciona para cualquier combinación de tipos numéricos: `UInt8` + `Float32`, `UInt16` + `Int8` y así sucesivamente. Además, algunas funciones variadas pueden aceptar cualquier número de argumentos, como el `concat` función.
Implementar una función puede ser un poco inconveniente porque una función distribuye explícitamente tipos de datos compatibles y `IColumns`. Por ejemplo, el `plus` La función tiene código generado por la creación de instancias de una plantilla de C ++ para cada combinación de tipos numéricos y argumentos izquierdo y derecho constantes o no constantes.
Es un excelente lugar para implementar la generación de código en tiempo de ejecución para evitar la hinchazón del código de plantilla. Además, permite agregar funciones fusionadas como multiplicar-agregar fusionado o hacer comparaciones múltiples en una iteración de bucle.
Debido a la ejecución de consultas vectorizadas, las funciones no se cortocircuitan. Por ejemplo, si escribe `WHERE f(x) AND g(y)`, ambos lados se calculan, incluso para las filas, cuando `f(x)` es cero (excepto cuando `f(x)` es una expresión constante cero). Pero si la selectividad del `f(x)` la condición es alta, y el cálculo de `f(x)` es mucho más barato que `g(y)`, es mejor implementar el cálculo de paso múltiple. Primero calcularía `f(x)`, a continuación, filtrar columnas por el resultado, y luego calcular `g(y)` solo para trozos de datos más pequeños y filtrados.
## Funciones agregadas {#aggregate-functions}
Las funciones agregadas son funciones con estado. Acumulan valores pasados en algún estado y le permiten obtener resultados de ese estado. Se gestionan con el `IAggregateFunction` interfaz. Los estados pueden ser bastante simples (el estado para `AggregateFunctionCount` es sólo una sola `UInt64` valor) o bastante complejo (el estado de `AggregateFunctionUniqCombined` es una combinación de una matriz lineal, una tabla hash, y un `HyperLogLog` estructura de datos probabilística).
Los Estados están asignados en `Arena` (un grupo de memoria) para tratar con múltiples estados mientras se ejecuta una alta cardinalidad `GROUP BY` consulta. Los estados pueden tener un constructor y destructor no trivial: por ejemplo, los estados de agregación complicados pueden asignar memoria adicional ellos mismos. Requiere cierta atención a la creación y destrucción de estados y a la adecuada aprobación de su orden de propiedad y destrucción.
Los estados de agregación se pueden serializar y deserializar para pasar a través de la red durante la ejecución de consultas distribuidas o para escribirlos en el disco donde no hay suficiente RAM. Incluso se pueden almacenar en una tabla con el `DataTypeAggregateFunction` para permitir la agregación incremental de datos.
> El formato de datos serializados para los estados de función agregados no tiene versiones en este momento. Está bien si los estados agregados solo se almacenan temporalmente. Pero tenemos el `AggregatingMergeTree` motor de tabla para la agregación incremental, y la gente ya lo está utilizando en producción. Es la razón por la que se requiere compatibilidad con versiones anteriores al cambiar el formato serializado para cualquier función agregada en el futuro.
## Servidor {#server}
El servidor implementa varias interfaces diferentes:
- Una interfaz HTTP para cualquier cliente extranjero.
- Una interfaz TCP para el cliente nativo de ClickHouse y para la comunicación entre servidores durante la ejecución de consultas distribuidas.
- Una interfaz para transferir datos para la replicación.
Internamente, es solo un servidor multiproceso primitivo sin corutinas ni fibras. Dado que el servidor no está diseñado para procesar una alta tasa de consultas simples, sino para procesar una tasa relativamente baja de consultas complejas, cada uno de ellos puede procesar una gran cantidad de datos para análisis.
El servidor inicializa el `Context` clase con el entorno necesario para la ejecución de consultas: la lista de bases de datos disponibles, usuarios y derechos de acceso, configuración, clústeres, la lista de procesos, el registro de consultas, etc. Los intérpretes utilizan este entorno.
Mantenemos una compatibilidad total con versiones anteriores y posteriores para el protocolo TCP del servidor: los clientes antiguos pueden hablar con servidores nuevos y los nuevos clientes pueden hablar con servidores antiguos. Pero no queremos mantenerlo eternamente, y estamos eliminando el soporte para versiones antiguas después de aproximadamente un año.
!!! note "Nota"
Para la mayoría de las aplicaciones externas, recomendamos usar la interfaz HTTP porque es simple y fácil de usar. El protocolo TCP está más estrechamente vinculado a las estructuras de datos internas: utiliza un formato interno para pasar bloques de datos y utiliza marcos personalizados para datos comprimidos. No hemos lanzado una biblioteca C para ese protocolo porque requiere vincular la mayor parte de la base de código ClickHouse, lo cual no es práctico.
## Ejecución de consultas distribuidas {#distributed-query-execution}
Los servidores de una configuración de clúster son en su mayoría independientes. Puede crear un `Distributed` en uno o todos los servidores de un clúster. El `Distributed` table does not store data itself it only provides a “view” a todas las tablas locales en varios nodos de un clúster. Cuando se SELECCIONA desde un `Distributed` tabla, reescribe esa consulta, elige nodos remotos de acuerdo con la configuración de equilibrio de carga y les envía la consulta. El `Distributed` table solicita a los servidores remotos que procesen una consulta hasta una etapa en la que se pueden fusionar resultados intermedios de diferentes servidores. Luego recibe los resultados intermedios y los fusiona. La tabla distribuida intenta distribuir tanto trabajo como sea posible a servidores remotos y no envía muchos datos intermedios a través de la red.
Las cosas se vuelven más complicadas cuando tiene subconsultas en cláusulas IN o JOIN, y cada una de ellas usa un `Distributed` tabla. Tenemos diferentes estrategias para la ejecución de estas consultas.
No existe un plan de consulta global para la ejecución de consultas distribuidas. Cada nodo tiene su plan de consulta local para su parte del trabajo. Solo tenemos una ejecución simple de consultas distribuidas de un solo paso: enviamos consultas para nodos remotos y luego fusionamos los resultados. Pero esto no es factible para consultas complicadas con alta cardinalidad GROUP BY o con una gran cantidad de datos temporales para JOIN. En tales casos, necesitamos “reshuffle” datos entre servidores, lo que requiere una coordinación adicional. ClickHouse no admite ese tipo de ejecución de consultas, y tenemos que trabajar en ello.
## Árbol de fusión {#merge-tree}
`MergeTree` es una familia de motores de almacenamiento que admite la indexación por clave principal. La clave principal puede ser una tupla arbitraria de columnas o expresiones. Datos en un `MergeTree` se almacena en “parts”. Cada parte almacena los datos en el orden de la clave principal, por lo que la tupla de la clave principal ordena los datos lexicográficamente. Todas las columnas de la tabla se almacenan en `column.bin` archivos en estas partes. Los archivos consisten en bloques comprimidos. Cada bloque suele ser de 64 KB a 1 MB de datos sin comprimir, dependiendo del tamaño del valor promedio. Los bloques constan de valores de columna colocados contiguamente uno tras otro. Los valores de columna están en el mismo orden para cada columna (la clave principal define el orden), por lo que cuando itera por muchas columnas, obtiene valores para las filas correspondientes.
La clave principal en sí es “sparse”. No aborda cada fila, sino solo algunos rangos de datos. Separado `primary.idx` file tiene el valor de la clave principal para cada fila N-ésima, donde se llama N `index_granularity` (generalmente, N = 8192). Además, para cada columna, tenemos `column.mrk` archivos con “marks,” que son desplazamientos a cada fila N-ésima en el archivo de datos. Cada marca es un par: el desplazamiento en el archivo al comienzo del bloque comprimido y el desplazamiento en el bloque descomprimido al comienzo de los datos. Por lo general, los bloques comprimidos están alineados por marcas, y el desplazamiento en el bloque descomprimido es cero. Datos para `primary.idx` siempre reside en la memoria, y los datos para `column.mrk` archivos se almacena en caché.
Cuando vamos a leer algo de una parte en `MergeTree` miramos `primary.idx` datos y localice rangos que podrían contener datos solicitados, luego mire `column.mrk` datos y calcular compensaciones para dónde comenzar a leer esos rangos. Debido a la escasez, el exceso de datos puede ser leído. ClickHouse no es adecuado para una gran carga de consultas de puntos simples, porque todo el rango con `index_granularity` se deben leer filas para cada clave, y todo el bloque comprimido debe descomprimirse para cada columna. Hicimos que el índice sea disperso porque debemos poder mantener billones de filas por único servidor sin un consumo de memoria notable para el índice. Además, debido a que la clave principal es escasa, no es única: no puede verificar la existencia de la clave en la tabla en el momento de INSERTAR. Podría tener muchas filas con la misma clave en una tabla.
Cuando `INSERT` un montón de datos en `MergeTree`, ese grupo está ordenado por orden de clave primaria y forma una nueva parte. Hay subprocesos de fondo que seleccionan periódicamente algunas partes y las fusionan en una sola parte ordenada para mantener el número de partes relativamente bajo. Es por eso que se llama `MergeTree`. Por supuesto, la fusión conduce a “write amplification”. Todas las partes son inmutables: solo se crean y eliminan, pero no se modifican. Cuando se ejecuta SELECT, contiene una instantánea de la tabla (un conjunto de partes). Después de la fusión, también mantenemos las piezas viejas durante algún tiempo para facilitar la recuperación después de la falla, por lo que si vemos que alguna parte fusionada probablemente esté rota, podemos reemplazarla con sus partes de origen.
`MergeTree` no es un árbol de LSM porque no contiene “memtable” y “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications.
> Las tablas MergeTree solo pueden tener un índice (primario): no hay índices secundarios. Sería bueno permitir múltiples representaciones físicas bajo una tabla lógica, por ejemplo, para almacenar datos en más de un orden físico o incluso para permitir representaciones con datos preagregados junto con datos originales.
Hay motores MergeTree que están haciendo un trabajo adicional durante las fusiones en segundo plano. Los ejemplos son `CollapsingMergeTree` y `AggregatingMergeTree`. Esto podría tratarse como soporte especial para actualizaciones. Tenga en cuenta que estas no son actualizaciones reales porque los usuarios generalmente no tienen control sobre el tiempo en que se ejecutan las fusiones en segundo plano y los datos en un `MergeTree` casi siempre se almacena en más de una parte, no en forma completamente fusionada.
## Replicación {#replication}
La replicación en ClickHouse se puede configurar por tabla. Podría tener algunas tablas replicadas y otras no replicadas en el mismo servidor. También puede tener tablas replicadas de diferentes maneras, como una tabla con replicación de dos factores y otra con replicación de tres factores.
La replicación se implementa en el `ReplicatedMergeTree` motor de almacenamiento. El camino en `ZooKeeper` se especifica como un parámetro para el motor de almacenamiento. Todas las tablas con la misma ruta en `ZooKeeper` se convierten en réplicas entre sí: sincronizan sus datos y mantienen la coherencia. Las réplicas se pueden agregar y eliminar dinámicamente simplemente creando o soltando una tabla.
La replicación utiliza un esquema multi-maestro asíncrono. Puede insertar datos en cualquier réplica que tenga una sesión con `ZooKeeper`, y los datos se replican en todas las demás réplicas de forma asíncrona. Como ClickHouse no admite UPDATE, la replicación está libre de conflictos. Como no hay reconocimiento de quórum de inserciones, los datos recién insertados pueden perderse si un nodo falla.
Los metadatos para la replicación se almacenan en ZooKeeper. Hay un registro de replicación que enumera las acciones que se deben realizar. Las acciones son: obtener parte; fusionar partes; soltar una partición, etc. Cada réplica copia el registro de replicación en su cola y, a continuación, ejecuta las acciones desde la cola. Por ejemplo, en la inserción, el “get the part” la acción se crea en el registro y cada réplica descarga esa parte. Las fusiones se coordinan entre réplicas para obtener resultados idénticos en bytes. Todas las piezas se combinan de la misma manera en todas las réplicas. Se logra eligiendo una réplica como líder, y esa réplica inicia fusiones y escrituras “merge parts” acciones al registro.
La replicación es física: solo las partes comprimidas se transfieren entre nodos, no consultas. Las fusiones se procesan en cada réplica de forma independiente en la mayoría de los casos para reducir los costos de red al evitar la amplificación de la red. Las piezas combinadas grandes se envían a través de la red solo en casos de retraso de replicación significativo.
Además, cada réplica almacena su estado en ZooKeeper como el conjunto de piezas y sus sumas de comprobación. Cuando el estado en el sistema de archivos local difiere del estado de referencia en ZooKeeper, la réplica restaura su coherencia descargando partes faltantes y rotas de otras réplicas. Cuando hay algunos datos inesperados o rotos en el sistema de archivos local, ClickHouse no los elimina, sino que los mueve a un directorio separado y los olvida.
!!! note "Nota"
El clúster ClickHouse consta de fragmentos independientes y cada fragmento consta de réplicas. El clúster es **no elástico**, por lo tanto, después de agregar un nuevo fragmento, los datos no se reequilibran automáticamente entre fragmentos. En su lugar, se supone que la carga del clúster debe ajustarse para que sea desigual. Esta implementación le da más control, y está bien para clústeres relativamente pequeños, como decenas de nodos. Pero para los clústeres con cientos de nodos que estamos utilizando en producción, este enfoque se convierte en un inconveniente significativo. Debemos implementar un motor de tablas que abarque todo el clúster con regiones replicadas dinámicamente que puedan dividirse y equilibrarse entre clústeres automáticamente.
{## [Artículo Original](https://clickhouse.tech/docs/en/development/architecture/) ##}

View File

@ -1,14 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 63
toc_title: "Buscar c\xF3digo fuente"
---
# Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code}
Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse.tech/codebrowser/html_report/ClickHouse/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente.
Además, puede navegar por las fuentes en [GitHub](https://github.com/ClickHouse/ClickHouse) como de costumbre.
Si está interesado en qué IDE usar, recomendamos CLion, QT Creator, VS Code y KDevelop (con advertencias). Puedes usar cualquier IDE favorito. Vim y Emacs también cuentan.

View File

@ -1,43 +0,0 @@
---
machine_translated: true
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
toc_priority: 67
toc_title: "C\xF3mo construir ClickHouse en Linux para AARCH64 (ARM64)"
---
# Cómo construir ClickHouse en Linux para la arquitectura AARCH64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture}
Esto es para el caso cuando tiene una máquina Linux y desea usarla para compilar `clickhouse` binario que se ejecutará en otra máquina Linux con arquitectura de CPU AARCH64. Esto está destinado a las comprobaciones de integración continua que se ejecutan en servidores Linux.
La compilación cruzada para AARCH64 se basa en el [Instrucciones de construcción](build.md), seguirlos primero.
# Instalar Clang-8 {#install-clang-8}
Siga las instrucciones de https://apt.llvm.org/ para la configuración de Ubuntu o Debian.
Por ejemplo, en Ubuntu Bionic puede usar los siguientes comandos:
``` bash
echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list
sudo apt-get update
sudo apt-get install clang-8
```
# Instalar conjunto de herramientas de compilación cruzada {#install-cross-compilation-toolset}
``` bash
cd ClickHouse
mkdir -p build-aarch64/cmake/toolchain/linux-aarch64
wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1
```
# Construir ClickHouse {#build-clickhouse}
``` bash
cd ClickHouse
mkdir build-arm64
CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake
ninja -C build-arm64
```
El binario resultante se ejecutará solo en Linux con la arquitectura de CPU AARCH64.

Some files were not shown because too many files have changed in this diff Show More