mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-15 19:02:04 +00:00
Merge branch 'master' of https://github.com/yandex/ClickHouse into move_partition
This commit is contained in:
commit
f727580483
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -97,9 +97,6 @@
|
||||
[submodule "contrib/rapidjson"]
|
||||
path = contrib/rapidjson
|
||||
url = https://github.com/Tencent/rapidjson
|
||||
[submodule "contrib/mimalloc"]
|
||||
path = contrib/mimalloc
|
||||
url = https://github.com/ClickHouse-Extras/mimalloc
|
||||
[submodule "contrib/fastops"]
|
||||
path = contrib/fastops
|
||||
url = https://github.com/ClickHouse-Extras/fastops
|
||||
|
259
CMakeLists.txt
259
CMakeLists.txt
@ -1,14 +1,22 @@
|
||||
foreach(policy
|
||||
CMP0023
|
||||
CMP0048 # CMake 3.0
|
||||
CMP0074 # CMake 3.12
|
||||
CMP0077
|
||||
CMP0079
|
||||
)
|
||||
if(POLICY ${policy})
|
||||
cmake_policy(SET ${policy} NEW)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
project(ClickHouse)
|
||||
cmake_minimum_required(VERSION 3.3)
|
||||
|
||||
foreach(policy
|
||||
CMP0023
|
||||
CMP0074 # CMake 3.12
|
||||
)
|
||||
if(POLICY ${policy})
|
||||
cmake_policy(SET ${policy} NEW)
|
||||
endif()
|
||||
endforeach()
|
||||
# Ignore export() since we don't use it,
|
||||
# but it gets broken with a global targets via link_libraries()
|
||||
macro (export)
|
||||
endmacro ()
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS 1) # Write compile_commands.json
|
||||
@ -128,12 +136,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (GLIBC_COMPATIBILITY)
|
||||
set (USE_INTERNAL_MEMCPY ON)
|
||||
else ()
|
||||
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
|
||||
endif ()
|
||||
|
||||
string(REGEX MATCH "-?[0-9]+(.[0-9]+)?$" COMPILER_POSTFIX ${CMAKE_CXX_COMPILER})
|
||||
|
||||
find_program (LLD_PATH NAMES "lld${COMPILER_POSTFIX}" "lld")
|
||||
@ -172,20 +174,15 @@ if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
endif ()
|
||||
|
||||
# Special options for better optimized code with clang
|
||||
#if (COMPILER_CLANG)
|
||||
# set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -Wno-unused-command-line-argument -mllvm -inline-threshold=10000")
|
||||
#endif ()
|
||||
|
||||
if (CMAKE_VERSION VERSION_LESS "3.8.0")
|
||||
if (NOT MSVC)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1z")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
|
||||
endif ()
|
||||
else ()
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
|
||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set (CXX_FLAGS_INTERNAL_COMPILER "-std=c++1z")
|
||||
set (CXX_FLAGS_INTERNAL_COMPILER "-std=c++17")
|
||||
endif ()
|
||||
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
@ -207,17 +204,13 @@ endif()
|
||||
|
||||
set (CMAKE_BUILD_COLOR_MAKEFILE ON)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}")
|
||||
#set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
#set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
# Uses MAKE_STATIC_LIBRARIES
|
||||
|
||||
|
||||
option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF)
|
||||
if (UNBUNDLED)
|
||||
@ -225,149 +218,28 @@ if (UNBUNDLED)
|
||||
else ()
|
||||
set(NOT_UNBUNDLED 1)
|
||||
endif ()
|
||||
|
||||
# Using system libs can cause lot of warnings in includes.
|
||||
if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
|
||||
option (NO_WERROR "Disable -Werror compiler option" ON)
|
||||
endif ()
|
||||
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
|
||||
include (cmake/find_cxx.cmake)
|
||||
|
||||
include (cmake/test_compiler.cmake)
|
||||
|
||||
if (OS_LINUX AND COMPILER_CLANG AND USE_STATIC_LIBRARIES)
|
||||
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++ (only make sense on Linux)" ${HAVE_LIBCXX})
|
||||
|
||||
if (USE_LIBCXX)
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX)
|
||||
set (STATIC_STDLIB_FLAGS "")
|
||||
else ()
|
||||
set (STATIC_STDLIB_FLAGS "-static-libgcc -static-libstdc++")
|
||||
endif ()
|
||||
|
||||
if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (COMPILER_CLANG AND OS_FREEBSD))
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${STATIC_STDLIB_FLAGS}")
|
||||
|
||||
# Along with executables, we also build example of shared library for "library dictionary source"; and it also should be self-contained.
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${STATIC_STDLIB_FLAGS}")
|
||||
endif ()
|
||||
|
||||
if (USE_STATIC_LIBRARIES AND HAVE_NO_PIE)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG_NO_PIE}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG_NO_PIE}")
|
||||
endif ()
|
||||
|
||||
# Make this extra-checks for correct library dependencies.
|
||||
if (NOT SANITIZE)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
|
||||
endif ()
|
||||
|
||||
include (cmake/find_unwind.cmake)
|
||||
include(cmake/dbms_glob_sources.cmake)
|
||||
include(cmake/default_libs.cmake)
|
||||
|
||||
if (USE_INTERNAL_UNWIND_LIBRARY)
|
||||
option (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING "Use internal unwind library for exception handling" ${USE_STATIC_LIBRARIES})
|
||||
endif ()
|
||||
|
||||
|
||||
# Set standard, system and compiler libraries explicitly.
|
||||
# This is intended for more control of what we are linking.
|
||||
######################################
|
||||
### Add targets below this comment ###
|
||||
######################################
|
||||
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
set (DEFAULT_LIBS "")
|
||||
if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING OR USE_LIBCXX))
|
||||
# Note: this probably has no effect, but I'm not an expert in CMake.
|
||||
set (CMAKE_C_IMPLICIT_LINK_LIBRARIES "")
|
||||
set (CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "")
|
||||
|
||||
# Disable default linked libraries.
|
||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128. See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
set (BUILTINS_LIB_PATH "")
|
||||
if (COMPILER_CLANG)
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIB_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
set (BUILTINS_LIB_PATH "-lgcc")
|
||||
endif ()
|
||||
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
# Add C++ libraries.
|
||||
#
|
||||
# This consist of:
|
||||
# - C++ standard library (like implementation of std::string);
|
||||
# - C++ ABI implementation (functions for exceptions like __cxa_throw, RTTI, etc);
|
||||
# - functions for internal implementation of exception handling (stack unwinding based on DWARF info; TODO replace with bundled libunwind);
|
||||
# - compiler builtins (example: functions for implementation of __int128 operations);
|
||||
#
|
||||
# There are two variants of C++ library: libc++ (from LLVM compiler infrastructure) and libstdc++ (from GCC).
|
||||
|
||||
if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING)
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_static${${CMAKE_POSTFIX_VARIABLE}}.a")
|
||||
else ()
|
||||
set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_shared${${CMAKE_POSTFIX_VARIABLE}}.so")
|
||||
endif ()
|
||||
else ()
|
||||
set (EXCEPTION_HANDLING_LIBRARY "-lgcc_eh")
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using exception handling library: ${EXCEPTION_HANDLING_LIBRARY}")
|
||||
|
||||
if (USE_LIBCXX)
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
set (LIBCXX_LIBS "${ClickHouse_BINARY_DIR}/contrib/libcxx-cmake/libcxx_static${${CMAKE_POSTFIX_VARIABLE}}.a ${ClickHouse_BINARY_DIR}/contrib/libcxxabi-cmake/libcxxabi_static${${CMAKE_POSTFIX_VARIABLE}}.a")
|
||||
else ()
|
||||
set (LIBCXX_LIBS "-lc++ -lc++abi -lc++fs")
|
||||
endif ()
|
||||
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic ${LIBCXX_LIBS} ${EXCEPTION_HANDLING_LIBRARY} ${BUILTINS_LIB_PATH} -Wl,-Bdynamic")
|
||||
else ()
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic -lstdc++ -lstdc++fs ${EXCEPTION_HANDLING_LIBRARY} ${COVERAGE_OPTION} ${BUILTINS_LIB_PATH} -Wl,-Bdynamic")
|
||||
endif ()
|
||||
|
||||
# Linking with GLIBC prevents portability of binaries to older systems.
|
||||
# We overcome this behaviour by statically linking with our own implementation of all new symbols (that don't exist in older Libc or have infamous "symbol versioning").
|
||||
# The order of linking is important: 'glibc-compatibility' must be before libc but after all other libraries.
|
||||
if (GLIBC_COMPATIBILITY)
|
||||
message (STATUS "Some symbols from glibc will be replaced for compatibility")
|
||||
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
# FIXME: glibc-compatibility may be non-static in some builds!
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${ClickHouse_BINARY_DIR}/libs/libglibc-compatibility/libglibc-compatibility${${CMAKE_POSTFIX_VARIABLE}}.a")
|
||||
endif ()
|
||||
|
||||
# Add Libc. GLIBC is actually a collection of interdependent libraries.
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lrt -ldl -lpthread -lm -lc")
|
||||
|
||||
# Note: we'd rather use Musl libc library, but it's little bit more difficult to use.
|
||||
|
||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
endif ()
|
||||
|
||||
if (NOT GLIBC_COMPATIBILITY)
|
||||
set (M_LIBRARY m)
|
||||
endif ()
|
||||
|
||||
if (DEFAULT_LIBS)
|
||||
# Add default libs to all targets as the last dependency.
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
endif ()
|
||||
|
||||
if (NOT MAKE_STATIC_LIBRARIES)
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
endif ()
|
||||
@ -420,20 +292,12 @@ if (UNBUNDLED)
|
||||
else ()
|
||||
set(NOT_UNBUNDLED 1)
|
||||
endif ()
|
||||
|
||||
# Using system libs can cause lot of warnings in includes.
|
||||
if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
|
||||
option (NO_WERROR "Disable -Werror compiler option" ON)
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX)
|
||||
set (HAVE_LIBCXX 1)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
endif()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -isystem ${LIBCXX_INCLUDE_DIR} -isystem ${LIBCXXABI_INCLUDE_DIR}")
|
||||
endif ()
|
||||
|
||||
message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ; USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES} MAKE_STATIC_LIBRARIES=${MAKE_STATIC_LIBRARIES} SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES} UNBUNDLED=${UNBUNDLED} CCACHE=${CCACHE_FOUND} ${CCACHE_VERSION}")
|
||||
|
||||
include(GNUInstallDirs)
|
||||
@ -476,7 +340,6 @@ include (cmake/find_consistent-hashing.cmake)
|
||||
include (cmake/find_base64.cmake)
|
||||
include (cmake/find_parquet.cmake)
|
||||
include (cmake/find_hyperscan.cmake)
|
||||
include (cmake/find_mimalloc.cmake)
|
||||
include (cmake/find_simdjson.cmake)
|
||||
include (cmake/find_rapidjson.cmake)
|
||||
include (cmake/find_fastops.cmake)
|
||||
@ -499,79 +362,11 @@ include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
|
||||
|
||||
include (cmake/print_flags.cmake)
|
||||
|
||||
install (EXPORT global DESTINATION cmake)
|
||||
|
||||
add_subdirectory (contrib EXCLUDE_FROM_ALL)
|
||||
add_subdirectory (libs)
|
||||
add_subdirectory (utils)
|
||||
add_subdirectory (dbms)
|
||||
|
||||
include (cmake/print_include_directories.cmake)
|
||||
|
||||
if (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING)
|
||||
# FIXME: actually glibc-compatibility should always be built first,
|
||||
# because it's unconditionally linked via $DEFAULT_LIBS,
|
||||
# and these looks like the first places that get linked.
|
||||
function (add_default_dependencies target_name)
|
||||
if (TARGET ${target_name})
|
||||
if (GLIBC_COMPATIBILITY)
|
||||
add_dependencies(${target_name} glibc-compatibility)
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
add_dependencies(${target_name} cxx_static cxxabi_static)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING)
|
||||
add_dependencies(${target_name} unwind_static)
|
||||
endif ()
|
||||
endif ()
|
||||
endfunction ()
|
||||
|
||||
add_default_dependencies(ltdl)
|
||||
add_default_dependencies(zlibstatic)
|
||||
add_default_dependencies(jemalloc)
|
||||
add_default_dependencies(memcpy)
|
||||
add_default_dependencies(Foundation)
|
||||
add_default_dependencies(common)
|
||||
add_default_dependencies(gtest)
|
||||
add_default_dependencies(lz4)
|
||||
add_default_dependencies(zstd)
|
||||
add_default_dependencies(snappy)
|
||||
add_default_dependencies(arrow)
|
||||
add_default_dependencies(protoc)
|
||||
add_default_dependencies(thrift_static)
|
||||
add_default_dependencies(cityhash)
|
||||
add_default_dependencies(farmhash)
|
||||
add_default_dependencies(murmurhash)
|
||||
add_default_dependencies(metrohash)
|
||||
add_default_dependencies(metrohash128)
|
||||
add_default_dependencies(consistent-hashing)
|
||||
add_default_dependencies(double-conversion)
|
||||
add_default_dependencies(cctz)
|
||||
add_default_dependencies(kj)
|
||||
add_default_dependencies(simdjson)
|
||||
add_default_dependencies(apple_rt)
|
||||
add_default_dependencies(h3)
|
||||
add_default_dependencies(re2)
|
||||
add_default_dependencies(re2_st)
|
||||
add_default_dependencies(hs_compile_shared)
|
||||
add_default_dependencies(hs_exec_shared)
|
||||
add_default_dependencies(hs_shared)
|
||||
add_default_dependencies(widechar_width)
|
||||
add_default_dependencies(string_utils)
|
||||
add_default_dependencies(consistent-hashing-sumbur)
|
||||
add_default_dependencies(boost_program_options_internal)
|
||||
add_default_dependencies(boost_system_internal)
|
||||
add_default_dependencies(boost_regex_internal)
|
||||
add_default_dependencies(roaring)
|
||||
add_default_dependencies(btrie)
|
||||
add_default_dependencies(cpuid)
|
||||
add_default_dependencies(mysqlclient)
|
||||
add_default_dependencies(zlib)
|
||||
add_default_dependencies(thrift)
|
||||
add_default_dependencies(brotli)
|
||||
add_default_dependencies(libprotobuf)
|
||||
add_default_dependencies(base64)
|
||||
add_default_dependencies(readpassphrase)
|
||||
add_default_dependencies(unwind_static)
|
||||
add_default_dependencies(fastops)
|
||||
endif ()
|
||||
|
48
cmake/default_libs.cmake
Normal file
48
cmake/default_libs.cmake
Normal file
@ -0,0 +1,48 @@
|
||||
# Set standard, system and compiler libraries explicitly.
|
||||
# This is intended for more control of what we are linking.
|
||||
|
||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
if (OS_LINUX)
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
if (COMPILER_CLANG)
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else ()
|
||||
set (BUILTINS_LIBRARY "-lgcc")
|
||||
endif ()
|
||||
|
||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
||||
|
||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
endif ()
|
||||
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
|
||||
# Global libraries
|
||||
|
||||
add_library(global-libs INTERFACE)
|
||||
|
||||
# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'.
|
||||
# Just make sure we have pthreads at all.
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
add_subdirectory(libs/libglibc-compatibility)
|
||||
include (cmake/find_unwind.cmake)
|
||||
include (cmake/find_cxx.cmake)
|
||||
|
||||
add_library(global-group INTERFACE)
|
||||
target_link_libraries(global-group INTERFACE
|
||||
-Wl,--start-group
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
-Wl,--end-group
|
||||
)
|
||||
|
||||
link_libraries(global-group)
|
||||
|
||||
install(
|
||||
TARGETS global-group global-libs
|
||||
EXPORT global
|
||||
)
|
@ -1,50 +1,20 @@
|
||||
option (ENABLE_CAPNP "Enable Cap'n Proto" ON)
|
||||
|
||||
if (ENABLE_CAPNP)
|
||||
# cmake 3.5.1 bug:
|
||||
# capnproto uses this cmake feature:
|
||||
# target_compile_features(kj PUBLIC cxx_constexpr)
|
||||
# old cmake adds -std=gnu++11 to end of all compile commands (even if -std=gnu++17 already present in compile string)
|
||||
# cmake 3.9.1 (ubuntu artful) have no this bug (c++17 support added to cmake 3.8.2)
|
||||
if (CMAKE_VERSION VERSION_LESS "3.8.0")
|
||||
set (USE_INTERNAL_CAPNP_LIBRARY_DEFAULT 0)
|
||||
set (MISSING_INTERNAL_CAPNP_LIBRARY 1)
|
||||
else ()
|
||||
set (USE_INTERNAL_CAPNP_LIBRARY_DEFAULT ${NOT_UNBUNDLED})
|
||||
endif ()
|
||||
|
||||
option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${USE_INTERNAL_CAPNP_LIBRARY_DEFAULT})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/CMakeLists.txt")
|
||||
if (USE_INTERNAL_CAPNP_LIBRARY)
|
||||
message (WARNING "submodule contrib/capnproto is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif ()
|
||||
set (USE_INTERNAL_CAPNP_LIBRARY 0)
|
||||
set (MISSING_INTERNAL_CAPNP_LIBRARY 1)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
|
||||
set (CAPNP_PATHS "/usr/local/opt/capnp/lib")
|
||||
set (CAPNP_INCLUDE_PATHS "/usr/local/opt/capnp/include")
|
||||
find_library (CAPNP capnp PATHS ${CAPNP_PATHS})
|
||||
find_library (CAPNPC capnpc PATHS ${CAPNP_PATHS})
|
||||
find_library (KJ kj PATHS ${CAPNP_PATHS})
|
||||
set (CAPNP_LIBRARY ${CAPNPC} ${CAPNP} ${KJ})
|
||||
find_path (CAPNP_INCLUDE_DIR NAMES capnp/schema-parser.h PATHS ${CAPNP_INCLUDE_PATHS})
|
||||
endif ()
|
||||
|
||||
if (CAPNP_INCLUDE_DIR AND CAPNP_LIBRARY)
|
||||
set(USE_CAPNP 1)
|
||||
elseif (NOT MISSING_INTERNAL_CAPNP_LIBRARY)
|
||||
set (USE_INTERNAL_CAPNP_LIBRARY 1)
|
||||
set (CAPNP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src")
|
||||
set (CAPNP_LIBRARY capnpc)
|
||||
set (USE_CAPNP 1)
|
||||
endif ()
|
||||
endif ()
|
||||
option (USE_CAPNP "Enable Cap'n Proto" ON)
|
||||
|
||||
if (USE_CAPNP)
|
||||
message (STATUS "Using capnp=${USE_CAPNP}: ${CAPNP_INCLUDE_DIR} : ${CAPNP_LIBRARY}")
|
||||
else ()
|
||||
message (STATUS "Build without capnp (support for Cap'n Proto format will be disabled)")
|
||||
option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
# FIXME: refactor to use `add_library(… IMPORTED)` if possible.
|
||||
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
|
||||
find_library (KJ kj)
|
||||
find_library (CAPNP capnp)
|
||||
find_library (CAPNPC capnpc)
|
||||
|
||||
set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ})
|
||||
else ()
|
||||
add_subdirectory(contrib/capnproto-cmake)
|
||||
|
||||
set (CAPNP_LIBRARIES capnpc)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using capnp: ${CAPNP_LIBRARIES}")
|
||||
endif ()
|
||||
|
@ -1,13 +1,14 @@
|
||||
find_program (CCACHE_FOUND ccache)
|
||||
if (CCACHE_FOUND AND NOT CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" AND NOT CMAKE_CXX_COMPILER MATCHES "ccache")
|
||||
execute_process(COMMAND ${CCACHE_FOUND} "-V" OUTPUT_VARIABLE CCACHE_VERSION)
|
||||
string(REGEX REPLACE "ccache version ([0-9\\.]+).*" "\\1" CCACHE_VERSION ${CCACHE_VERSION})
|
||||
|
||||
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
#message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||
else ()
|
||||
message(STATUS "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
||||
endif ()
|
||||
if (CCACHE_FOUND AND NOT CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" AND NOT CMAKE_CXX_COMPILER MATCHES "ccache")
|
||||
execute_process(COMMAND ${CCACHE_FOUND} "-V" OUTPUT_VARIABLE CCACHE_VERSION)
|
||||
string(REGEX REPLACE "ccache version ([0-9\\.]+).*" "\\1" CCACHE_VERSION ${CCACHE_VERSION})
|
||||
|
||||
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
#message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
|
||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||
else ()
|
||||
message(STATUS "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
||||
endif ()
|
||||
endif ()
|
||||
|
@ -1,26 +1,50 @@
|
||||
if (NOT APPLE)
|
||||
if (OS_LINUX AND COMPILER_CLANG)
|
||||
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ON)
|
||||
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
|
||||
endif()
|
||||
|
||||
if (USE_LIBCXX)
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.
|
||||
endif ()
|
||||
|
||||
# FIXME: make better check for submodule presence
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxx/include/vector")
|
||||
message (WARNING "submodule contrib/libcxx is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (USE_INTERNAL_LIBCXX_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
# FIXME: make better check for submodule presence
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/src")
|
||||
message (WARNING "submodule contrib/libcxxabi is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (USE_INTERNAL_LIBCXXABI_LIBRARY 0)
|
||||
set (USE_INTERNAL_LIBCXX_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
find_library (LIBCXX_LIBRARY c++)
|
||||
find_library (LIBCXXABI_LIBRARY c++abi)
|
||||
if (USE_LIBCXX)
|
||||
if (NOT USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
find_library (LIBCXX_LIBRARY c++)
|
||||
find_library (LIBCXXFS_LIBRARY c++fs)
|
||||
find_library (LIBCXXABI_LIBRARY c++abi)
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
|
||||
target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY})
|
||||
else ()
|
||||
set (LIBCXX_LIBRARY cxx)
|
||||
set (LIBCXXABI_LIBRARY cxxabi)
|
||||
add_subdirectory(contrib/libcxxabi-cmake)
|
||||
add_subdirectory(contrib/libcxx-cmake)
|
||||
|
||||
# Exception handling library is embedded into libcxxabi.
|
||||
endif ()
|
||||
|
||||
target_link_libraries(global-libs INTERFACE ${LIBCXX_LIBRARY} ${LIBCXXABI_LIBRARY} ${LIBCXXFS_LIBRARY})
|
||||
|
||||
set (HAVE_LIBCXX 1)
|
||||
|
||||
message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}")
|
||||
message (STATUS "Using libcxxfs: ${LIBCXXFS_LIBRARY}")
|
||||
message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}")
|
||||
else ()
|
||||
set (LIBCXX_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx/include)
|
||||
set (LIBCXXABI_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/include)
|
||||
set (LIBCXX_LIBRARY cxx_static)
|
||||
set (LIBCXXABI_LIBRARY cxxabi_static)
|
||||
target_link_libraries(global-libs INTERFACE -l:libstdc++.a -l:libstdc++fs.a) # Always link these libraries as static
|
||||
target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY})
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}")
|
||||
message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}")
|
||||
|
@ -1,17 +0,0 @@
|
||||
if (OS_LINUX AND NOT SANITIZE AND NOT ARCH_ARM AND NOT ARCH_32 AND NOT ARCH_PPC64LE)
|
||||
option (ENABLE_MIMALLOC "Set to FALSE to disable usage of mimalloc for internal ClickHouse caches" FALSE)
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mimalloc/include/mimalloc.h")
|
||||
message (WARNING "submodule contrib/mimalloc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
if (ENABLE_MIMALLOC)
|
||||
message (FATAL_ERROR "Mimalloc is not production ready. (Disable with cmake -D ENABLE_MIMALLOC=0). If you want to use mimalloc, you must manually remove this message.")
|
||||
|
||||
set (MIMALLOC_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/mimalloc/include)
|
||||
set (USE_MIMALLOC 1)
|
||||
set (MIMALLOC_LIBRARY mimalloc-static)
|
||||
message (STATUS "Using mimalloc: ${MIMALLOC_INCLUDE_DIR} : ${MIMALLOC_LIBRARY}")
|
||||
endif ()
|
@ -1,59 +1,17 @@
|
||||
include (CMakePushCheckState)
|
||||
cmake_push_check_state ()
|
||||
option (USE_UNWIND "Enable libunwind (better stacktraces)" ON)
|
||||
|
||||
option (ENABLE_UNWIND "Enable libunwind (better stacktraces)" ON)
|
||||
if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32)
|
||||
set (USE_UNWIND OFF)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_UNWIND)
|
||||
if (USE_UNWIND)
|
||||
add_subdirectory(contrib/libunwind-cmake)
|
||||
set (UNWIND_LIBRARIES unwind)
|
||||
set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES})
|
||||
|
||||
if (CMAKE_SYSTEM MATCHES "Linux" AND NOT ARCH_ARM AND NOT ARCH_32)
|
||||
option (USE_INTERNAL_UNWIND_LIBRARY "Set to FALSE to use system unwind library instead of bundled" ${NOT_UNBUNDLED})
|
||||
message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}")
|
||||
else ()
|
||||
option (USE_INTERNAL_UNWIND_LIBRARY "Set to FALSE to use system unwind library instead of bundled" OFF)
|
||||
set (EXCEPTION_HANDLING_LIBRARY gcc_eh)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_UNWIND_LIBRARY)
|
||||
find_library (UNWIND_LIBRARY unwind)
|
||||
find_path (UNWIND_INCLUDE_DIR NAMES unwind.h PATHS ${UNWIND_INCLUDE_PATHS})
|
||||
|
||||
include (CheckCXXSourceCompiles)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${UNWIND_INCLUDE_DIR})
|
||||
set(CMAKE_REQUIRED_LIBRARIES ${UNWIND_LIBRARY})
|
||||
check_cxx_source_compiles("
|
||||
#include <ucontext.h>
|
||||
#define UNW_LOCAL_ONLY
|
||||
#include <libunwind.h>
|
||||
int main () {
|
||||
ucontext_t context;
|
||||
unw_cursor_t cursor;
|
||||
unw_init_local(&cursor, &context);
|
||||
return 0;
|
||||
}
|
||||
" HAVE_UNW_INIT_LOCAL)
|
||||
if (NOT HAVE_UNW_INIT_LOCAL)
|
||||
set(UNWIND_LIBRARY "")
|
||||
set(UNWIND_INCLUDE_DIR "")
|
||||
endif ()
|
||||
|
||||
endif ()
|
||||
|
||||
if (UNWIND_LIBRARY AND UNWIND_INCLUDE_DIR)
|
||||
set (USE_UNWIND 1)
|
||||
elseif (CMAKE_SYSTEM MATCHES "Linux" AND NOT ARCH_ARM AND NOT ARCH_32 AND NOT UNBUNDLED)
|
||||
set (USE_INTERNAL_UNWIND_LIBRARY 1)
|
||||
|
||||
set (PACKAGE_VERSION "9.0.0svn" CACHE STRING "")
|
||||
|
||||
set (UNWIND_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind/include")
|
||||
|
||||
set (LIBUNWIND_ENABLE_SHARED OFF CACHE BOOL "")
|
||||
set (LIBUNWIND_ENABLE_STATIC ON CACHE BOOL "")
|
||||
set (UNWIND_LIBRARY unwind_static)
|
||||
|
||||
set (USE_UNWIND 1)
|
||||
endif ()
|
||||
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using unwind=${USE_UNWIND}: ${UNWIND_INCLUDE_DIR} : ${UNWIND_LIBRARY}")
|
||||
|
||||
cmake_pop_check_state ()
|
||||
message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}")
|
||||
|
@ -1,47 +0,0 @@
|
||||
include (CheckCXXSourceCompiles)
|
||||
include (CMakePushCheckState)
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads)
|
||||
|
||||
cmake_push_check_state ()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
# clang4 : -no-pie cause error
|
||||
# clang6 : -no-pie cause warning
|
||||
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
set (TEST_FLAG "-Wl,-Bstatic -stdlib=libc++ -lc++ -lc++abi -Wl,-Bdynamic")
|
||||
else ()
|
||||
set (TEST_FLAG "-stdlib=libc++ -lc++ -lc++abi")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG}")
|
||||
set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} Threads::Threads)
|
||||
|
||||
check_cxx_source_compiles("
|
||||
#include <iostream>
|
||||
int main() {
|
||||
std::cerr << std::endl;
|
||||
return 0;
|
||||
}
|
||||
" HAVE_LIBCXX)
|
||||
|
||||
else ()
|
||||
|
||||
set (TEST_FLAG "-no-pie")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG}")
|
||||
|
||||
check_cxx_source_compiles("
|
||||
int main() {
|
||||
return 0;
|
||||
}
|
||||
" HAVE_NO_PIE)
|
||||
|
||||
if (HAVE_NO_PIE)
|
||||
set (FLAG_NO_PIE ${TEST_FLAG})
|
||||
endif ()
|
||||
|
||||
endif ()
|
||||
|
||||
cmake_pop_check_state ()
|
19
contrib/CMakeLists.txt
vendored
19
contrib/CMakeLists.txt
vendored
@ -23,16 +23,6 @@ if (USE_INTERNAL_ORC_LIBRARY)
|
||||
add_subdirectory(orc)
|
||||
endif()
|
||||
|
||||
if (USE_INTERNAL_UNWIND_LIBRARY)
|
||||
add_subdirectory (libunwind-cmake)
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
add_subdirectory(libcxx-cmake)
|
||||
add_subdirectory(libcxxabi-cmake)
|
||||
endif()
|
||||
|
||||
|
||||
if (USE_INTERNAL_BOOST_LIBRARY)
|
||||
add_subdirectory (boost-cmake)
|
||||
endif ()
|
||||
@ -172,15 +162,6 @@ if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY)
|
||||
add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES})
|
||||
endif ()
|
||||
|
||||
if (ENABLE_CAPNP AND USE_INTERNAL_CAPNP_LIBRARY)
|
||||
set (BUILD_TESTING 0 CACHE INTERNAL "")
|
||||
set (_save ${CMAKE_CXX_EXTENSIONS})
|
||||
set (CMAKE_CXX_EXTENSIONS)
|
||||
add_subdirectory (capnproto/c++)
|
||||
set (CMAKE_CXX_EXTENSIONS ${_save})
|
||||
target_include_directories(${CAPNP_LIBRARY} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/capnproto/c++/src>)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_PARQUET_LIBRARY)
|
||||
if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
|
||||
# We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time
|
||||
|
@ -44,7 +44,6 @@ set( thriftcpp_threads_SOURCES
|
||||
add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES})
|
||||
set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641
|
||||
target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src PRIVATE ${Boost_INCLUDE_DIRS})
|
||||
target_link_libraries(${THRIFT_LIBRARY} PRIVATE Threads::Threads)
|
||||
|
||||
|
||||
# === orc
|
||||
@ -219,7 +218,7 @@ endif()
|
||||
add_library(${ARROW_LIBRARY} ${ARROW_SRCS})
|
||||
add_dependencies(${ARROW_LIBRARY} protoc)
|
||||
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src ${Boost_INCLUDE_DIRS})
|
||||
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY} Threads::Threads)
|
||||
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY})
|
||||
if (ARROW_WITH_LZ4)
|
||||
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${LZ4_LIBRARY})
|
||||
endif()
|
||||
|
69
contrib/capnproto-cmake/CMakeLists.txt
Normal file
69
contrib/capnproto-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,69 @@
|
||||
set (CAPNPROTO_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src)
|
||||
|
||||
set (KJ_SRCS
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/array.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/common.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/debug.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/exception.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/io.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/memory.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/string.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/hash.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/table.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/thread.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/main.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/arena.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/units.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++
|
||||
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/time.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++
|
||||
)
|
||||
|
||||
add_library(kj ${KJ_SRCS})
|
||||
target_include_directories(kj PUBLIC ${CAPNPROTO_SOURCE_DIR})
|
||||
target_compile_options(kj PUBLIC -Wno-non-virtual-dtor)
|
||||
|
||||
set (CAPNP_SRCS
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/list.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/any.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/message.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++
|
||||
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++
|
||||
)
|
||||
|
||||
add_library(capnp ${CAPNP_SRCS})
|
||||
target_link_libraries(capnp PUBLIC kj)
|
||||
|
||||
set (CAPNPC_SRCS
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++
|
||||
${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++
|
||||
)
|
||||
|
||||
add_library(capnpc ${CAPNPC_SRCS})
|
||||
target_link_libraries(capnpc PUBLIC capnp)
|
@ -59,7 +59,6 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
|
||||
if (USE_UNWIND)
|
||||
target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||
target_include_directories (jemalloc BEFORE PRIVATE ${UNWIND_INCLUDE_DIR})
|
||||
target_link_libraries (jemalloc PRIVATE ${UNWIND_LIBRARY})
|
||||
target_link_libraries (jemalloc PRIVATE ${UNWIND_LIBRARIES})
|
||||
endif ()
|
||||
endif ()
|
||||
|
@ -1,5 +1,4 @@
|
||||
set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
|
||||
#set(LIBCXX_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libcxx)
|
||||
|
||||
set(SRCS
|
||||
${LIBCXX_SOURCE_DIR}/src/optional.cpp
|
||||
@ -16,10 +15,6 @@ ${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/hash.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/string.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/debug.cpp
|
||||
#${LIBCXX_SOURCE_DIR}/src/support/win32/support.cpp
|
||||
#${LIBCXX_SOURCE_DIR}/src/support/win32/locale_win32.cpp
|
||||
#${LIBCXX_SOURCE_DIR}/src/support/win32/thread_win32.cpp
|
||||
#${LIBCXX_SOURCE_DIR}/src/support/solaris/xlocale.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/utility.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/any.cpp
|
||||
@ -43,9 +38,17 @@ ${LIBCXX_SOURCE_DIR}/src/system_error.cpp
|
||||
${LIBCXX_SOURCE_DIR}/src/random.cpp
|
||||
)
|
||||
|
||||
add_library(cxx_static ${SRCS})
|
||||
add_library(cxx ${SRCS})
|
||||
|
||||
target_include_directories(cxx_static PUBLIC ${LIBCXX_SOURCE_DIR}/include)
|
||||
target_compile_definitions(cxx_static PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
target_compile_options(cxx_static PRIVATE -nostdinc++)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
target_compile_options(cxx PUBLIC -nostdinc++ -Wno-reserved-id-macro)
|
||||
target_link_libraries(cxx PUBLIC cxxabi)
|
||||
|
||||
install(
|
||||
TARGETS cxx
|
||||
EXPORT global
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION lib
|
||||
LIBRARY DESTINATION lib
|
||||
)
|
||||
|
@ -1,13 +1,10 @@
|
||||
set(LIBCXXABI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi)
|
||||
set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
|
||||
#set(LIBCXXABI_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libcxxabi)
|
||||
|
||||
set(SRCS
|
||||
${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp
|
||||
#${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp
|
||||
@ -25,10 +22,20 @@ ${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp
|
||||
${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp
|
||||
)
|
||||
|
||||
add_library(cxxabi_static ${SRCS})
|
||||
|
||||
target_include_directories(cxxabi_static PUBLIC ${LIBCXXABI_SOURCE_DIR}/include ${LIBCXX_SOURCE_DIR}/include)
|
||||
target_compile_definitions(cxxabi_static PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||
target_compile_options(cxxabi_static PRIVATE -nostdinc++ -fno-sanitize=undefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||
add_library(cxxabi ${SRCS})
|
||||
|
||||
target_include_directories(cxxabi SYSTEM BEFORE
|
||||
PUBLIC $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${LIBCXXABI_SOURCE_DIR}/../libcxx/include>
|
||||
)
|
||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||
target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY})
|
||||
|
||||
install(
|
||||
TARGETS cxxabi
|
||||
EXPORT global
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION lib
|
||||
LIBRARY DESTINATION lib
|
||||
)
|
||||
|
@ -65,7 +65,7 @@ add_library(rdkafka ${SRCS})
|
||||
target_include_directories(rdkafka SYSTEM PUBLIC include)
|
||||
target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) # Because weird logic with "include_next" is used.
|
||||
target_include_directories(rdkafka SYSTEM PRIVATE ${ZSTD_INCLUDE_DIR}/common) # Because wrong path to "zstd_errors.h" is used.
|
||||
target_link_libraries(rdkafka PRIVATE ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY} Threads::Threads)
|
||||
target_link_libraries(rdkafka PRIVATE ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY})
|
||||
if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY)
|
||||
target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY})
|
||||
endif()
|
||||
|
@ -24,9 +24,15 @@ set(LIBUNWIND_SOURCES
|
||||
${LIBUNWIND_C_SOURCES}
|
||||
${LIBUNWIND_ASM_SOURCES})
|
||||
|
||||
add_library(unwind_static ${LIBUNWIND_SOURCES})
|
||||
add_library(unwind ${LIBUNWIND_SOURCES})
|
||||
|
||||
target_include_directories(unwind_static SYSTEM BEFORE PUBLIC ${LIBUNWIND_SOURCE_DIR}/include)
|
||||
target_compile_definitions(unwind_static PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
target_compile_options(unwind_static PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti)
|
||||
target_link_libraries(unwind_static PRIVATE Threads::Threads ${CMAKE_DL_LIBS})
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti)
|
||||
|
||||
install(
|
||||
TARGETS unwind
|
||||
EXPORT global
|
||||
LIBRARY DESTINATION lib
|
||||
ARCHIVE DESTINATION lib
|
||||
)
|
||||
|
@ -62,11 +62,6 @@ endif()
|
||||
|
||||
add_library(mysqlclient ${SRCS})
|
||||
|
||||
target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} Threads::Threads)
|
||||
if(M_LIBRARY)
|
||||
target_link_libraries(mysqlclient PRIVATE ${M_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(OPENSSL_LIBRARIES)
|
||||
target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES})
|
||||
target_compile_definitions(mysqlclient PRIVATE -D HAVE_OPENSSL -D HAVE_TLS)
|
||||
|
1
contrib/mimalloc
vendored
1
contrib/mimalloc
vendored
@ -1 +0,0 @@
|
||||
Subproject commit a787bdebce94bf3776dc0d1ad597917f479ab8d5
|
@ -97,8 +97,6 @@ add_subdirectory (src)
|
||||
set(dbms_headers)
|
||||
set(dbms_sources)
|
||||
|
||||
include(../cmake/dbms_glob_sources.cmake)
|
||||
|
||||
add_headers_and_sources(clickhouse_common_io src/Common)
|
||||
add_headers_and_sources(clickhouse_common_io src/Common/HashTable)
|
||||
add_headers_and_sources(clickhouse_common_io src/IO)
|
||||
@ -163,9 +161,7 @@ if (OS_FREEBSD)
|
||||
endif ()
|
||||
|
||||
if (USE_UNWIND)
|
||||
if (NOT USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING)
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARY})
|
||||
endif ()
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARIES})
|
||||
endif ()
|
||||
|
||||
add_subdirectory(src/Common/ZooKeeper)
|
||||
@ -241,15 +237,10 @@ target_link_libraries(clickhouse_common_io
|
||||
${EXECINFO_LIBRARIES}
|
||||
PUBLIC
|
||||
${Boost_SYSTEM_LIBRARY}
|
||||
${Boost_PROGRAM_OPTIONS_LIBRARY}
|
||||
PRIVATE
|
||||
apple_rt
|
||||
PUBLIC
|
||||
Threads::Threads
|
||||
PRIVATE
|
||||
${CMAKE_DL_LIBS}
|
||||
PRIVATE
|
||||
rt
|
||||
PUBLIC
|
||||
roaring
|
||||
)
|
||||
|
||||
@ -265,11 +256,6 @@ if(RE2_INCLUDE_DIR)
|
||||
target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_MIMALLOC)
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MIMALLOC_INCLUDE_DIR})
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ${MIMALLOC_LIBRARY})
|
||||
endif ()
|
||||
|
||||
if(CPUID_LIBRARY)
|
||||
target_link_libraries(clickhouse_common_io PRIVATE ${CPUID_LIBRARY})
|
||||
endif()
|
||||
@ -297,7 +283,6 @@ target_link_libraries (dbms
|
||||
${Boost_FILESYSTEM_LIBRARY}
|
||||
PUBLIC
|
||||
${Boost_SYSTEM_LIBRARY}
|
||||
Threads::Threads
|
||||
)
|
||||
|
||||
target_include_directories(dbms PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include)
|
||||
@ -364,10 +349,7 @@ if (USE_ICU)
|
||||
endif ()
|
||||
|
||||
if (USE_CAPNP)
|
||||
target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARY})
|
||||
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
|
||||
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${CAPNP_INCLUDE_DIR})
|
||||
endif ()
|
||||
target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARIES})
|
||||
endif ()
|
||||
|
||||
if (USE_PARQUET)
|
||||
@ -380,7 +362,6 @@ endif ()
|
||||
if(OPENSSL_CRYPTO_LIBRARY)
|
||||
target_link_libraries(dbms PRIVATE ${OPENSSL_CRYPTO_LIBRARY})
|
||||
endif ()
|
||||
target_link_libraries(dbms PRIVATE Threads::Threads)
|
||||
|
||||
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR})
|
||||
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
|
||||
|
@ -32,6 +32,8 @@
|
||||
#include <Client/Connection.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
#include <Common/Config/configReadClient.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/StudentTTest.h>
|
||||
|
||||
|
||||
/** A tool for evaluating ClickHouse performance.
|
||||
@ -41,6 +43,8 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using Ports = std::vector<UInt16>;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
@ -50,17 +54,34 @@ namespace ErrorCodes
|
||||
class Benchmark : public Poco::Util::Application
|
||||
{
|
||||
public:
|
||||
Benchmark(unsigned concurrency_, double delay_,
|
||||
const String & host_, UInt16 port_, bool secure_, const String & default_database_,
|
||||
Benchmark(unsigned concurrency_, double delay_, Strings && hosts_, Ports && ports_,
|
||||
bool cumulative_, bool secure_, const String & default_database_,
|
||||
const String & user_, const String & password_, const String & stage,
|
||||
bool randomize_, size_t max_iterations_, double max_time_,
|
||||
const String & json_path_, const Settings & settings_)
|
||||
const String & json_path_, size_t confidence_, const Settings & settings_)
|
||||
:
|
||||
concurrency(concurrency_), delay(delay_), queue(concurrency),
|
||||
connections(concurrency, host_, port_, default_database_, user_, password_, "benchmark", Protocol::Compression::Enable, secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable),
|
||||
randomize(randomize_), max_iterations(max_iterations_), max_time(max_time_),
|
||||
json_path(json_path_), settings(settings_), global_context(Context::createGlobal()), pool(concurrency)
|
||||
concurrency(concurrency_), delay(delay_), queue(concurrency), randomize(randomize_),
|
||||
cumulative(cumulative_), max_iterations(max_iterations_), max_time(max_time_),
|
||||
confidence(confidence_), json_path(json_path_), settings(settings_),
|
||||
global_context(Context::createGlobal()), pool(concurrency)
|
||||
{
|
||||
const auto secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
||||
size_t connections_cnt = std::max(ports_.size(), hosts_.size());
|
||||
|
||||
connections.reserve(connections_cnt);
|
||||
comparison_info_total.reserve(connections_cnt);
|
||||
comparison_info_per_interval.reserve(connections_cnt);
|
||||
|
||||
for (size_t i = 0; i < connections_cnt; ++i)
|
||||
{
|
||||
UInt16 cur_port = i >= ports_.size() ? 9000 : ports_[i];
|
||||
std::string cur_host = i >= hosts_.size() ? "localhost" : hosts_[i];
|
||||
|
||||
connections.emplace_back(std::make_unique<ConnectionPool>(concurrency, cur_host, cur_port, default_database_, user_, password_, "benchmark", Protocol::Compression::Enable, secure));
|
||||
comparison_info_per_interval.emplace_back(std::make_shared<Stats>());
|
||||
comparison_info_total.emplace_back(std::make_shared<Stats>());
|
||||
}
|
||||
|
||||
global_context.makeGlobalContext();
|
||||
|
||||
std::cerr << std::fixed << std::setprecision(3);
|
||||
@ -101,21 +122,29 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
using Query = std::string;
|
||||
using Entry = ConnectionPool::Entry;
|
||||
using EntryPtr = std::shared_ptr<Entry>;
|
||||
using EntryPtrs = std::vector<EntryPtr>;
|
||||
|
||||
unsigned concurrency;
|
||||
double delay;
|
||||
|
||||
using Query = std::string;
|
||||
using Queries = std::vector<Query>;
|
||||
Queries queries;
|
||||
|
||||
using Queue = ConcurrentBoundedQueue<Query>;
|
||||
Queue queue;
|
||||
|
||||
ConnectionPool connections;
|
||||
using ConnectionPoolUniq = std::unique_ptr<ConnectionPool>;
|
||||
using ConnectionPoolUniqs = std::vector<ConnectionPoolUniq>;
|
||||
ConnectionPoolUniqs connections;
|
||||
|
||||
bool randomize;
|
||||
bool cumulative;
|
||||
size_t max_iterations;
|
||||
double max_time;
|
||||
size_t confidence;
|
||||
String json_path;
|
||||
Settings settings;
|
||||
Context global_context;
|
||||
@ -128,12 +157,12 @@ private:
|
||||
|
||||
struct Stats
|
||||
{
|
||||
Stopwatch watch;
|
||||
std::atomic<size_t> queries{0};
|
||||
size_t read_rows = 0;
|
||||
size_t read_bytes = 0;
|
||||
size_t result_rows = 0;
|
||||
size_t result_bytes = 0;
|
||||
double work_time = 0;
|
||||
|
||||
using Sampler = ReservoirSampler<double>;
|
||||
Sampler sampler {1 << 16};
|
||||
@ -141,6 +170,7 @@ private:
|
||||
void add(double seconds, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
|
||||
{
|
||||
++queries;
|
||||
work_time += seconds;
|
||||
read_rows += read_rows_inc;
|
||||
read_bytes += read_bytes_inc;
|
||||
result_rows += result_rows_inc;
|
||||
@ -150,8 +180,8 @@ private:
|
||||
|
||||
void clear()
|
||||
{
|
||||
watch.restart();
|
||||
queries = 0;
|
||||
work_time = 0;
|
||||
read_rows = 0;
|
||||
read_bytes = 0;
|
||||
result_rows = 0;
|
||||
@ -160,15 +190,18 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
Stats info_per_interval;
|
||||
Stats info_total;
|
||||
using MultiStats = std::vector<std::shared_ptr<Stats>>;
|
||||
MultiStats comparison_info_per_interval;
|
||||
MultiStats comparison_info_total;
|
||||
StudentTTest t_test;
|
||||
|
||||
Stopwatch total_watch;
|
||||
Stopwatch delay_watch;
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
|
||||
void readQueries()
|
||||
{
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
@ -213,7 +246,7 @@ private:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (max_time > 0 && info_total.watch.elapsedSeconds() >= max_time)
|
||||
if (max_time > 0 && total_watch.elapsedSeconds() >= max_time)
|
||||
{
|
||||
std::cout << "Stopping launch of queries. Requested time limit is exhausted.\n";
|
||||
return false;
|
||||
@ -227,8 +260,8 @@ private:
|
||||
|
||||
if (delay > 0 && delay_watch.elapsedSeconds() > delay)
|
||||
{
|
||||
printNumberOfQueriesExecuted(info_total.queries);
|
||||
report(info_per_interval);
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
cumulative ? report(comparison_info_total) : report(comparison_info_per_interval);
|
||||
delay_watch.restart();
|
||||
}
|
||||
}
|
||||
@ -242,11 +275,17 @@ private:
|
||||
std::uniform_int_distribution<size_t> distribution(0, queries.size() - 1);
|
||||
|
||||
for (size_t i = 0; i < concurrency; ++i)
|
||||
pool.schedule(std::bind(&Benchmark::thread, this,
|
||||
connections.get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
|
||||
{
|
||||
EntryPtrs connection_entries;
|
||||
connection_entries.reserve(connections.size());
|
||||
|
||||
for (const auto & connection : connections)
|
||||
connection_entries.emplace_back(std::make_shared<Entry>(connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
|
||||
|
||||
pool.schedule(std::bind(&Benchmark::thread, this, connection_entries));
|
||||
}
|
||||
|
||||
InterruptListener interrupt_listener;
|
||||
info_per_interval.watch.restart();
|
||||
delay_watch.restart();
|
||||
|
||||
/// Push queries into queue
|
||||
@ -262,20 +301,24 @@ private:
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
info_total.watch.stop();
|
||||
total_watch.stop();
|
||||
|
||||
if (!json_path.empty())
|
||||
reportJSON(info_total, json_path);
|
||||
reportJSON(comparison_info_total, json_path);
|
||||
|
||||
printNumberOfQueriesExecuted(info_total.queries);
|
||||
report(info_total);
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
report(comparison_info_total);
|
||||
}
|
||||
|
||||
|
||||
void thread(ConnectionPool::Entry connection)
|
||||
void thread(EntryPtrs & connection_entries)
|
||||
{
|
||||
Query query;
|
||||
|
||||
/// Randomly choosing connection index
|
||||
pcg64 generator(randomSeed());
|
||||
std::uniform_int_distribution<size_t> distribution(0, connection_entries.size() - 1);
|
||||
|
||||
try
|
||||
{
|
||||
/// In these threads we do not accept INT signal.
|
||||
@ -296,8 +339,7 @@ private:
|
||||
if (shutdown || (max_iterations && queries_executed == max_iterations))
|
||||
return;
|
||||
}
|
||||
|
||||
execute(connection, query);
|
||||
execute(connection_entries, query, distribution(generator));
|
||||
++queries_executed;
|
||||
}
|
||||
}
|
||||
@ -309,20 +351,19 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void execute(ConnectionPool::Entry & connection, Query & query)
|
||||
void execute(EntryPtrs & connection_entries, Query & query, size_t connection_index)
|
||||
{
|
||||
Stopwatch watch;
|
||||
RemoteBlockInputStream stream(
|
||||
*connection,
|
||||
*(*connection_entries[connection_index]),
|
||||
query, {}, global_context, &settings, nullptr, Tables(), query_processing_stage);
|
||||
|
||||
Progress progress;
|
||||
stream.setProgressCallback([&progress](const Progress & value) { progress.incrementPiecewiseAtomically(value); });
|
||||
|
||||
stream.readPrefix();
|
||||
while (Block block = stream.read())
|
||||
;
|
||||
while (Block block = stream.read());
|
||||
|
||||
stream.readSuffix();
|
||||
|
||||
const BlockStreamProfileInfo & info = stream.getProfileInfo();
|
||||
@ -330,33 +371,47 @@ private:
|
||||
double seconds = watch.elapsedSeconds();
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
info_per_interval.add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
info_total.add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
|
||||
comparison_info_per_interval[connection_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
comparison_info_total[connection_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
t_test.add(connection_index, seconds);
|
||||
}
|
||||
|
||||
|
||||
void report(Stats & info)
|
||||
void report(MultiStats & infos)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
/// Avoid zeros, nans or exceptions
|
||||
if (0 == info.queries)
|
||||
return;
|
||||
std::cerr << "\n";
|
||||
for (size_t i = 0; i < infos.size(); ++i)
|
||||
{
|
||||
const auto & info = infos[i];
|
||||
|
||||
double seconds = info.watch.elapsedSeconds();
|
||||
/// Avoid zeros, nans or exceptions
|
||||
if (0 == info->queries)
|
||||
return;
|
||||
|
||||
std::cerr
|
||||
<< "\n"
|
||||
<< "QPS: " << (info.queries / seconds) << ", "
|
||||
<< "RPS: " << (info.read_rows / seconds) << ", "
|
||||
<< "MiB/s: " << (info.read_bytes / seconds / 1048576) << ", "
|
||||
<< "result RPS: " << (info.result_rows / seconds) << ", "
|
||||
<< "result MiB/s: " << (info.result_bytes / seconds / 1048576) << "."
|
||||
<< "\n";
|
||||
double seconds = info->work_time / concurrency;
|
||||
|
||||
std::cerr
|
||||
<< connections[i]->getDescription() << ", "
|
||||
<< "queries " << info->queries << ", "
|
||||
<< "QPS: " << (info->queries / seconds) << ", "
|
||||
<< "RPS: " << (info->read_rows / seconds) << ", "
|
||||
<< "MiB/s: " << (info->read_bytes / seconds / 1048576) << ", "
|
||||
<< "result RPS: " << (info->result_rows / seconds) << ", "
|
||||
<< "result MiB/s: " << (info->result_bytes / seconds / 1048576) << "."
|
||||
<< "\n";
|
||||
}
|
||||
std::cerr << "\n";
|
||||
|
||||
auto print_percentile = [&](double percent)
|
||||
{
|
||||
std::cerr << percent << "%\t" << info.sampler.quantileInterpolated(percent / 100.0) << " sec." << std::endl;
|
||||
std::cerr << percent << "%\t\t";
|
||||
for (const auto & info : infos)
|
||||
{
|
||||
std::cerr << info->sampler.quantileInterpolated(percent / 100.0) << " sec." << "\t";
|
||||
}
|
||||
std::cerr << "\n";
|
||||
};
|
||||
|
||||
for (int percent = 0; percent <= 90; percent += 10)
|
||||
@ -367,10 +422,16 @@ private:
|
||||
print_percentile(99.9);
|
||||
print_percentile(99.99);
|
||||
|
||||
info.clear();
|
||||
std::cerr << "\n" << t_test.compareAndReport(confidence).second << "\n";
|
||||
|
||||
if (!cumulative)
|
||||
{
|
||||
for (auto & info : infos)
|
||||
info->clear();
|
||||
}
|
||||
}
|
||||
|
||||
void reportJSON(Stats & info, const std::string & filename)
|
||||
void reportJSON(MultiStats & infos, const std::string & filename)
|
||||
{
|
||||
WriteBufferFromFile json_out(filename);
|
||||
|
||||
@ -381,36 +442,41 @@ private:
|
||||
json_out << double_quote << key << ": " << value << (with_comma ? ",\n" : "\n");
|
||||
};
|
||||
|
||||
auto print_percentile = [&json_out, &info](auto percent, bool with_comma = true)
|
||||
auto print_percentile = [&json_out](Stats & info, auto percent, bool with_comma = true)
|
||||
{
|
||||
json_out << "\"" << percent << "\"" << ": " << info.sampler.quantileInterpolated(percent / 100.0) << (with_comma ? ",\n" : "\n");
|
||||
};
|
||||
|
||||
json_out << "{\n";
|
||||
|
||||
json_out << double_quote << "statistics" << ": {\n";
|
||||
for (size_t i = 0; i < infos.size(); ++i)
|
||||
{
|
||||
const auto & info = infos[i];
|
||||
|
||||
double seconds = info.watch.elapsedSeconds();
|
||||
print_key_value("QPS", info.queries / seconds);
|
||||
print_key_value("RPS", info.read_rows / seconds);
|
||||
print_key_value("MiBPS", info.read_bytes / seconds);
|
||||
print_key_value("RPS_result", info.result_rows / seconds);
|
||||
print_key_value("MiBPS_result", info.result_bytes / seconds);
|
||||
print_key_value("num_queries", info.queries.load(), false);
|
||||
json_out << double_quote << connections[i]->getDescription() << ": {\n";
|
||||
json_out << double_quote << "statistics" << ": {\n";
|
||||
|
||||
json_out << "},\n";
|
||||
print_key_value("QPS", info->queries / info->work_time);
|
||||
print_key_value("RPS", info->read_rows / info->work_time);
|
||||
print_key_value("MiBPS", info->read_bytes / info->work_time);
|
||||
print_key_value("RPS_result", info->result_rows / info->work_time);
|
||||
print_key_value("MiBPS_result", info->result_bytes / info->work_time);
|
||||
print_key_value("num_queries", info->queries.load(), false);
|
||||
|
||||
json_out << double_quote << "query_time_percentiles" << ": {\n";
|
||||
json_out << "},\n";
|
||||
json_out << double_quote << "query_time_percentiles" << ": {\n";
|
||||
|
||||
for (int percent = 0; percent <= 90; percent += 10)
|
||||
print_percentile(percent);
|
||||
for (int percent = 0; percent <= 90; percent += 10)
|
||||
print_percentile(*info, percent);
|
||||
|
||||
print_percentile(95);
|
||||
print_percentile(99);
|
||||
print_percentile(99.9);
|
||||
print_percentile(99.99, false);
|
||||
print_percentile(*info, 95);
|
||||
print_percentile(*info, 99);
|
||||
print_percentile(*info, 99.9);
|
||||
print_percentile(*info, 99.99, false);
|
||||
|
||||
json_out << "}\n";
|
||||
json_out << "}\n";
|
||||
json_out << (i == infos.size() - 1 ? "}\n" : "},\n");
|
||||
}
|
||||
|
||||
json_out << "}\n";
|
||||
}
|
||||
@ -439,7 +505,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
{
|
||||
using boost::program_options::value;
|
||||
|
||||
boost::program_options::options_description desc("Allowed options");
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help", "produce help message")
|
||||
("concurrency,c", value<unsigned>()->default_value(1), "number of parallel queries")
|
||||
@ -449,13 +515,15 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
("timelimit,t", value<double>()->default_value(0.), "stop launch of queries after specified time limit")
|
||||
("randomize,r", value<bool>()->default_value(false), "randomize order of execution")
|
||||
("json", value<std::string>()->default_value(""), "write final report to specified file in JSON format")
|
||||
("host,h", value<std::string>()->default_value("localhost"), "")
|
||||
("port", value<UInt16>()->default_value(9000), "")
|
||||
("host,h", value<Strings>()->multitoken(), "")
|
||||
("port,p", value<Ports>()->multitoken(), "")
|
||||
("cumulative", "prints cumulative data instead of data per interval")
|
||||
("secure,s", "Use TLS connection")
|
||||
("user", value<std::string>()->default_value("default"), "")
|
||||
("password", value<std::string>()->default_value(""), "")
|
||||
("database", value<std::string>()->default_value("default"), "")
|
||||
("stacktrace", "print stack traces of exceptions")
|
||||
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
|
||||
;
|
||||
|
||||
Settings settings;
|
||||
@ -475,12 +543,15 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
print_stacktrace = options.count("stacktrace");
|
||||
|
||||
UseSSL use_ssl;
|
||||
Ports ports = options.count("port") ? options["port"].as<Ports>() : Ports({9000});
|
||||
Strings hosts = options.count("host") ? options["host"].as<Strings>() : Strings({"localhost"});
|
||||
|
||||
Benchmark benchmark(
|
||||
options["concurrency"].as<unsigned>(),
|
||||
options["delay"].as<double>(),
|
||||
options["host"].as<std::string>(),
|
||||
options["port"].as<UInt16>(),
|
||||
std::move(hosts),
|
||||
std::move(ports),
|
||||
options.count("cumulative"),
|
||||
options.count("secure"),
|
||||
options["database"].as<std::string>(),
|
||||
options["user"].as<std::string>(),
|
||||
@ -490,6 +561,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["iterations"].as<size_t>(),
|
||||
options["timelimit"].as<double>(),
|
||||
options["json"].as<std::string>(),
|
||||
options["confidence"].as<size_t>(),
|
||||
settings);
|
||||
return benchmark.run();
|
||||
}
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <Common/Config/configReadClient.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <common/argsToConfig.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
#if USE_READLINE
|
||||
#include "Suggest.h"
|
||||
@ -130,7 +131,7 @@ private:
|
||||
bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode.
|
||||
bool stdin_is_not_tty = false; /// stdin is not a terminal.
|
||||
|
||||
winsize terminal_size {}; /// Terminal size is needed to render progress bar.
|
||||
uint16_t terminal_width = 0; /// Terminal width is needed to render progress bar.
|
||||
|
||||
std::unique_ptr<Connection> connection; /// Connection to DB.
|
||||
String query_id; /// Current query_id.
|
||||
@ -671,7 +672,7 @@ private:
|
||||
String text;
|
||||
|
||||
if (config().has("query"))
|
||||
text = config().getString("query");
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
else
|
||||
{
|
||||
/// If 'query' parameter is not set, read a query from stdin.
|
||||
@ -1465,7 +1466,7 @@ private:
|
||||
|
||||
if (show_progress_bar)
|
||||
{
|
||||
ssize_t width_of_progress_bar = static_cast<ssize_t>(terminal_size.ws_col) - written_progress_chars - strlen(" 99%");
|
||||
ssize_t width_of_progress_bar = static_cast<ssize_t>(terminal_width) - written_progress_chars - strlen(" 99%");
|
||||
if (width_of_progress_bar > 0)
|
||||
{
|
||||
std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar));
|
||||
@ -1642,22 +1643,13 @@ public:
|
||||
|
||||
stdin_is_not_tty = !isatty(STDIN_FILENO);
|
||||
|
||||
if (!stdin_is_not_tty)
|
||||
terminal_width = getTerminalWidth();
|
||||
|
||||
namespace po = boost::program_options;
|
||||
|
||||
unsigned line_length = po::options_description::m_default_line_length;
|
||||
unsigned min_description_length = line_length / 2;
|
||||
if (!stdin_is_not_tty)
|
||||
{
|
||||
if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size))
|
||||
throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", ErrorCodes::SYSTEM_ERROR);
|
||||
line_length = std::max(
|
||||
static_cast<unsigned>(strlen("--http_native_compression_disable_checksumming_on_decompress ")),
|
||||
static_cast<unsigned>(terminal_size.ws_col));
|
||||
min_description_length = std::min(min_description_length, line_length - 2);
|
||||
}
|
||||
|
||||
/// Main commandline options related to client functionality and all parameters from Settings.
|
||||
po::options_description main_description("Main options", line_length, min_description_length);
|
||||
po::options_description main_description = createOptionsDescription("Main options", terminal_width);
|
||||
main_description.add_options()
|
||||
("help", "produce help message")
|
||||
("config-file,C", po::value<std::string>(), "config-file path")
|
||||
@ -1672,7 +1664,7 @@ public:
|
||||
* the "\n" is used to distinguish this case because there is hardly a chance an user would use "\n"
|
||||
* as the password.
|
||||
*/
|
||||
("password", po::value<std::string>()->implicit_value("\n"), "password")
|
||||
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
|
||||
("ask-password", "ask-password")
|
||||
("query_id", po::value<std::string>(), "query_id")
|
||||
("query,q", po::value<std::string>(), "query")
|
||||
@ -1703,7 +1695,7 @@ public:
|
||||
context.getSettingsRef().addProgramOptions(main_description);
|
||||
|
||||
/// Commandline options related to external tables.
|
||||
po::options_description external_description("External tables options");
|
||||
po::options_description external_description = createOptionsDescription("External tables options", terminal_width);
|
||||
external_description.add_options()
|
||||
("file", po::value<std::string>(), "data file or - for stdin")
|
||||
("name", po::value<std::string>()->default_value("_data"), "name of the table")
|
||||
|
@ -12,8 +12,9 @@
|
||||
#include <IO/copyData.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -59,7 +60,7 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
|
||||
|
||||
int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
{
|
||||
boost::program_options::options_description desc("Allowed options");
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help,h", "produce help message")
|
||||
("decompress,d", "decompress")
|
||||
|
@ -6,13 +6,13 @@
|
||||
#include <Parsers/ParserQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
boost::program_options::options_description desc("Allowed options");
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help,h", "produce help message")
|
||||
("hilite", "add syntax highlight with ANSI terminal escape sequences")
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options.hpp>
|
||||
#include <common/argsToConfig.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -267,7 +268,7 @@ void LocalServer::attachSystemTables()
|
||||
void LocalServer::processQueries()
|
||||
{
|
||||
String initial_create_query = getInitialCreateTableQuery();
|
||||
String queries_str = initial_create_query + config().getString("query");
|
||||
String queries_str = initial_create_query + config().getRawString("query");
|
||||
|
||||
std::vector<String> queries;
|
||||
auto parse_res = splitMultipartQuery(queries_str, queries);
|
||||
@ -409,17 +410,7 @@ void LocalServer::init(int argc, char ** argv)
|
||||
/// Don't parse options with Poco library, we prefer neat boost::program_options
|
||||
stopOptionsProcessing();
|
||||
|
||||
unsigned line_length = po::options_description::m_default_line_length;
|
||||
unsigned min_description_length = line_length / 2;
|
||||
if (isatty(STDIN_FILENO))
|
||||
{
|
||||
winsize terminal_size{};
|
||||
ioctl(0, TIOCGWINSZ, &terminal_size);
|
||||
line_length = std::max(3U, static_cast<unsigned>(terminal_size.ws_col));
|
||||
min_description_length = std::min(min_description_length, line_length - 2);
|
||||
}
|
||||
|
||||
po::options_description description("Main options", line_length, min_description_length);
|
||||
po::options_description description = createOptionsDescription("Main options", getTerminalWidth());
|
||||
description.add_options()
|
||||
("help", "produce help message")
|
||||
("config-file,c", po::value<std::string>(), "config-file path")
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <boost/program_options.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/container/flat_map.hpp>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
|
||||
static const char * documantation = R"(
|
||||
@ -949,7 +950,7 @@ try
|
||||
using namespace DB;
|
||||
namespace po = boost::program_options;
|
||||
|
||||
po::options_description description("Options");
|
||||
po::options_description description = createOptionsDescription("Options", getTerminalWidth());
|
||||
description.add_options()
|
||||
("help", "produce help message")
|
||||
("structure,S", po::value<std::string>(), "structure of the initial table (list of column and type names)")
|
||||
|
@ -97,7 +97,7 @@ void PerformanceTestInfo::applySettings(XMLConfigurationPtr config)
|
||||
}
|
||||
|
||||
extractSettings(config, "settings", config_settings, settings_to_apply);
|
||||
settings.loadFromChanges(settings_to_apply);
|
||||
settings.applyChanges(settings_to_apply);
|
||||
|
||||
if (settings_contain("average_rows_speed_precision"))
|
||||
TestStats::avg_rows_speed_precision =
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <Core/Settings.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
#include "TestStopConditions.h"
|
||||
#include "TestStats.h"
|
||||
@ -324,7 +325,7 @@ try
|
||||
using po::value;
|
||||
using Strings = DB::Strings;
|
||||
|
||||
po::options_description desc("Allowed options");
|
||||
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help", "produce help message")
|
||||
("lite", "use lite version of output")
|
||||
|
@ -520,7 +520,18 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
/// Init trace collector only after trace_log system table was created
|
||||
/// Disable it if we collect test coverage information, because it will work extremely slow.
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY && !WITH_COVERAGE
|
||||
///
|
||||
/// It also cannot work with sanitizers.
|
||||
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
|
||||
/// And they do unwiding frequently (on every malloc/free, thread/mutex operations, etc).
|
||||
/// They change %rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwiding
|
||||
/// and query profiler decide to unwind stack with libunwind at this moment.
|
||||
///
|
||||
/// Symptoms: you'll get silent Segmentation Fault - without sanitizer message and without usual ClickHouse diagnostics.
|
||||
///
|
||||
/// Look at compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
|
||||
///
|
||||
#if USE_UNWIND && !WITH_COVERAGE && !defined(SANITIZER)
|
||||
/// QueryProfiler cannot work reliably with any other libunwind or without PHDR cache.
|
||||
if (hasPHDRCache())
|
||||
global_context->initializeTraceCollector();
|
||||
|
@ -90,7 +90,7 @@ public:
|
||||
auto & set = this->data(place).value;
|
||||
size_t size = set.size();
|
||||
writeVarUInt(size, buf);
|
||||
for (auto & elem : set)
|
||||
for (const auto & elem : set)
|
||||
writeIntBinary(elem, buf);
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ public:
|
||||
{
|
||||
Entry entry;
|
||||
if (settings)
|
||||
entry = Base::get(settings->queue_max_wait_ms.totalMilliseconds());
|
||||
entry = Base::get(settings->connection_pool_max_wait_ms.totalMilliseconds());
|
||||
else
|
||||
entry = Base::get(-1);
|
||||
|
||||
@ -88,6 +88,10 @@ public:
|
||||
{
|
||||
return host;
|
||||
}
|
||||
std::string getDescription() const
|
||||
{
|
||||
return host + ":" + toString(port);
|
||||
}
|
||||
|
||||
protected:
|
||||
/** Creates a new object to put in the pool. */
|
||||
|
@ -34,7 +34,7 @@ namespace
|
||||
auto & data = res_col->getData();
|
||||
|
||||
data.resize(hash_map.size());
|
||||
for (auto val : hash_map)
|
||||
for (const auto & val : hash_map)
|
||||
data[val.getSecond()] = val.getFirst();
|
||||
|
||||
for (auto & ind : index)
|
||||
|
@ -81,6 +81,16 @@ MutableColumnPtr ColumnTuple::cloneEmpty() const
|
||||
return ColumnTuple::create(std::move(new_columns));
|
||||
}
|
||||
|
||||
MutableColumnPtr ColumnTuple::cloneResized(size_t new_size) const
|
||||
{
|
||||
const size_t tuple_size = columns.size();
|
||||
MutableColumns new_columns(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
new_columns[i] = columns[i]->cloneResized(new_size);
|
||||
|
||||
return ColumnTuple::create(std::move(new_columns));
|
||||
}
|
||||
|
||||
Field ColumnTuple::operator[](size_t n) const
|
||||
{
|
||||
return Tuple{ext::map<TupleBackend>(columns, [n] (const auto & column) { return (*column)[n]; })};
|
||||
|
@ -42,6 +42,7 @@ public:
|
||||
const char * getFamilyName() const override { return "Tuple"; }
|
||||
|
||||
MutableColumnPtr cloneEmpty() const override;
|
||||
MutableColumnPtr cloneResized(size_t size) const override;
|
||||
|
||||
size_t size() const override
|
||||
{
|
||||
|
@ -4,5 +4,5 @@ add_headers_and_sources(clickhouse_common_config .)
|
||||
|
||||
add_library(clickhouse_common_config ${clickhouse_common_config_headers} ${clickhouse_common_config_sources})
|
||||
|
||||
target_link_libraries(clickhouse_common_config PUBLIC common PRIVATE clickhouse_common_zookeeper string_utils PUBLIC ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY} Threads::Threads)
|
||||
target_link_libraries(clickhouse_common_config PUBLIC common PRIVATE clickhouse_common_zookeeper string_utils PUBLIC ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY})
|
||||
target_include_directories(clickhouse_common_config PUBLIC ${DBMS_INCLUDE_DIR})
|
||||
|
@ -446,7 +446,7 @@ namespace ErrorCodes
|
||||
extern const int VIOLATED_CONSTRAINT = 469;
|
||||
extern const int QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW = 470;
|
||||
extern const int SETTINGS_ARE_NOT_SUPPORTED = 471;
|
||||
extern const int IMMUTABLE_SETTING = 472;
|
||||
extern const int READONLY_SETTING = 472;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -11,8 +11,8 @@ struct FixedHashMapCell
|
||||
using State = TState;
|
||||
|
||||
using value_type = PairNoInit<Key, Mapped>;
|
||||
bool full;
|
||||
Mapped mapped;
|
||||
bool full;
|
||||
|
||||
FixedHashMapCell() {}
|
||||
FixedHashMapCell(const Key &, const State &) : full(true) {}
|
||||
|
@ -128,14 +128,12 @@ struct HashMapCellWithSavedHash : public HashMapCell<Key, TMapped, Hash, TState>
|
||||
};
|
||||
|
||||
|
||||
template
|
||||
<
|
||||
template <
|
||||
typename Key,
|
||||
typename Cell,
|
||||
typename Hash = DefaultHash<Key>,
|
||||
typename Grower = HashTableGrower<>,
|
||||
typename Allocator = HashTableAllocator
|
||||
>
|
||||
typename Allocator = HashTableAllocator>
|
||||
class HashMapTable : public HashTable<Key, Cell, Hash, Grower, Allocator>
|
||||
{
|
||||
public:
|
||||
@ -173,23 +171,19 @@ public:
|
||||
};
|
||||
|
||||
|
||||
template
|
||||
<
|
||||
template <
|
||||
typename Key,
|
||||
typename Mapped,
|
||||
typename Hash = DefaultHash<Key>,
|
||||
typename Grower = HashTableGrower<>,
|
||||
typename Allocator = HashTableAllocator
|
||||
>
|
||||
typename Allocator = HashTableAllocator>
|
||||
using HashMap = HashMapTable<Key, HashMapCell<Key, Mapped, Hash>, Hash, Grower, Allocator>;
|
||||
|
||||
|
||||
template
|
||||
<
|
||||
template <
|
||||
typename Key,
|
||||
typename Mapped,
|
||||
typename Hash = DefaultHash<Key>,
|
||||
typename Grower = HashTableGrower<>,
|
||||
typename Allocator = HashTableAllocator
|
||||
>
|
||||
typename Allocator = HashTableAllocator>
|
||||
using HashMapWithSavedHash = HashMapTable<Key, HashMapCellWithSavedHash<Key, Mapped, Hash>, Hash, Grower, Allocator>;
|
||||
|
@ -95,7 +95,6 @@ struct HashTableCell
|
||||
|
||||
/// Create a cell with the given key / key and value.
|
||||
HashTableCell(const Key & key_, const State &) : key(key_) {}
|
||||
/// HashTableCell(const value_type & value_, const State & state) : key(value_) {}
|
||||
|
||||
/// Get what the value_type of the container will be.
|
||||
value_type & getValueMutable() { return key; }
|
||||
|
@ -1,70 +0,0 @@
|
||||
#include "MiAllocator.h"
|
||||
|
||||
#if USE_MIMALLOC
|
||||
#include <mimalloc.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_ALLOCATE_MEMORY;
|
||||
}
|
||||
|
||||
void * MiAllocator::alloc(size_t size, size_t alignment)
|
||||
{
|
||||
void * ptr;
|
||||
if (alignment == 0)
|
||||
{
|
||||
ptr = mi_malloc(size);
|
||||
if (!ptr)
|
||||
DB::throwFromErrno("MiAllocator: Cannot allocate in mimalloc " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
}
|
||||
else
|
||||
{
|
||||
ptr = mi_malloc_aligned(size, alignment);
|
||||
if (!ptr)
|
||||
DB::throwFromErrno("MiAllocator: Cannot allocate in mimalloc (mi_malloc_aligned) " + formatReadableSizeWithBinarySuffix(size) + " with alignment " + toString(alignment) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void MiAllocator::free(void * buf, size_t)
|
||||
{
|
||||
mi_free(buf);
|
||||
}
|
||||
|
||||
void * MiAllocator::realloc(void * old_ptr, size_t, size_t new_size, size_t alignment)
|
||||
{
|
||||
if (old_ptr == nullptr)
|
||||
return alloc(new_size, alignment);
|
||||
|
||||
if (new_size == 0)
|
||||
{
|
||||
mi_free(old_ptr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void * ptr;
|
||||
|
||||
if (alignment == 0)
|
||||
{
|
||||
ptr = mi_realloc(old_ptr, alignment);
|
||||
if (!ptr)
|
||||
DB::throwFromErrno("MiAllocator: Cannot reallocate in mimalloc " + formatReadableSizeWithBinarySuffix(size) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
}
|
||||
else
|
||||
{
|
||||
ptr = mi_realloc_aligned(old_ptr, new_size, alignment);
|
||||
if (!ptr)
|
||||
DB::throwFromErrno("MiAllocator: Cannot reallocate in mimalloc (mi_realloc_aligned) " + formatReadableSizeWithBinarySuffix(size) + " with alignment " + toString(alignment) + ".", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,27 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_MIMALLOC
|
||||
#include <cstddef>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* This is a different allocator that is based on mimalloc (Microsoft malloc).
|
||||
* It can be used separately from main allocator to catch heap corruptions and vulnerabilities (for example, for caches).
|
||||
* We use MI_SECURE mode in mimalloc to achieve such behaviour.
|
||||
*/
|
||||
struct MiAllocator
|
||||
{
|
||||
static void * alloc(size_t size, size_t alignment = 0);
|
||||
|
||||
static void free(void * buf, size_t);
|
||||
|
||||
static void * realloc(void * old_ptr, size_t, size_t new_size, size_t alignment = 0);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -36,8 +36,10 @@
|
||||
M(MarkCacheMisses, "") \
|
||||
M(CreatedReadBufferOrdinary, "") \
|
||||
M(CreatedReadBufferAIO, "") \
|
||||
M(CreatedReadBufferAIOFailed, "") \
|
||||
M(CreatedWriteBufferOrdinary, "") \
|
||||
M(CreatedWriteBufferAIO, "") \
|
||||
M(CreatedWriteBufferAIOFailed, "") \
|
||||
M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \
|
||||
M(DiskWriteElapsedMicroseconds, "Total time spent waiting for write syscall. This include writes to page cache.") \
|
||||
M(NetworkReceiveElapsedMicroseconds, "") \
|
||||
|
@ -100,7 +100,7 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const Int32 thread_id, const
|
||||
: log(&Logger::get("QueryProfiler"))
|
||||
, pause_signal(pause_signal_)
|
||||
{
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY
|
||||
#if USE_UNWIND
|
||||
/// Sanity check.
|
||||
if (!hasPHDRCache())
|
||||
throw Exception("QueryProfiler cannot be used without PHDR cache, that is not available for TSan build", ErrorCodes::NOT_IMPLEMENTED);
|
||||
@ -173,7 +173,7 @@ QueryProfilerBase<ProfilerImpl>::~QueryProfilerBase()
|
||||
template <typename ProfilerImpl>
|
||||
void QueryProfilerBase<ProfilerImpl>::tryCleanup()
|
||||
{
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY
|
||||
#if USE_UNWIND
|
||||
if (timer_id != nullptr && timer_delete(timer_id))
|
||||
LOG_ERROR(log, "Failed to delete query profiler timer " + errnoToString(ErrorCodes::CANNOT_DELETE_TIMER));
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
#include <Common/config.h>
|
||||
#include <common/config_common.h>
|
||||
#include <signal.h>
|
||||
#include <time.h>
|
||||
@ -43,7 +44,7 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY
|
||||
#if USE_UNWIND
|
||||
/// Timer id from timer_create(2)
|
||||
timer_t timer_id = nullptr;
|
||||
#endif
|
||||
|
@ -1,15 +1,20 @@
|
||||
#include <common/SimpleCache.h>
|
||||
#include <common/demangle.h>
|
||||
#include <Common/config.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/SymbolIndex.h>
|
||||
|
||||
#include <Common/Dwarf.h>
|
||||
#include <Common/Elf.h>
|
||||
#include <sstream>
|
||||
#include <filesystem>
|
||||
#include <unordered_map>
|
||||
#include <cstring>
|
||||
#include <Common/SymbolIndex.h>
|
||||
#include <Common/config.h>
|
||||
#include <common/SimpleCache.h>
|
||||
#include <common/demangle.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <sstream>
|
||||
#include <unordered_map>
|
||||
|
||||
#if USE_UNWIND
|
||||
# include <libunwind.h>
|
||||
#endif
|
||||
|
||||
std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext_t & context)
|
||||
{
|
||||
@ -215,12 +220,6 @@ StackTrace::StackTrace(NoCapture)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
#if USE_UNWIND
|
||||
extern "C" int unw_backtrace(void **, int);
|
||||
#endif
|
||||
|
||||
|
||||
void StackTrace::tryCapture()
|
||||
{
|
||||
size = 0;
|
||||
|
169
dbms/src/Common/StudentTTest.cpp
Normal file
169
dbms/src/Common/StudentTTest.cpp
Normal file
@ -0,0 +1,169 @@
|
||||
#include "StudentTTest.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
/// First row corresponds to infinity size of distributions case
|
||||
const double students_table[101][6] =
|
||||
{
|
||||
{ 1.282, 1.645, 1.960, 2.326, 2.576, 3.090 },
|
||||
{ 3.078, 6.314, 12.706, 31.821, 63.657, 318.313 },
|
||||
{ 1.886, 2.920, 4.303, 6.965, 9.925, 22.327 },
|
||||
{ 1.638, 2.353, 3.182, 4.541, 5.841, 10.215 },
|
||||
{ 1.533, 2.132, 2.776, 3.747, 4.604, 7.173 },
|
||||
{ 1.476, 2.015, 2.571, 3.365, 4.032, 5.893 },
|
||||
{ 1.440, 1.943, 2.447, 3.143, 3.707, 5.208 },
|
||||
{ 1.415, 1.895, 2.365, 2.998, 3.499, 4.782 },
|
||||
{ 1.397, 1.860, 2.306, 2.896, 3.355, 4.499 },
|
||||
{ 1.383, 1.833, 2.262, 2.821, 3.250, 4.296 },
|
||||
{ 1.372, 1.812, 2.228, 2.764, 3.169, 4.143 },
|
||||
{ 1.363, 1.796, 2.201, 2.718, 3.106, 4.024 },
|
||||
{ 1.356, 1.782, 2.179, 2.681, 3.055, 3.929 },
|
||||
{ 1.350, 1.771, 2.160, 2.650, 3.012, 3.852 },
|
||||
{ 1.345, 1.761, 2.145, 2.624, 2.977, 3.787 },
|
||||
{ 1.341, 1.753, 2.131, 2.602, 2.947, 3.733 },
|
||||
{ 1.337, 1.746, 2.120, 2.583, 2.921, 3.686 },
|
||||
{ 1.333, 1.740, 2.110, 2.567, 2.898, 3.646 },
|
||||
{ 1.330, 1.734, 2.101, 2.552, 2.878, 3.610 },
|
||||
{ 1.328, 1.729, 2.093, 2.539, 2.861, 3.579 },
|
||||
{ 1.325, 1.725, 2.086, 2.528, 2.845, 3.552 },
|
||||
{ 1.323, 1.721, 2.080, 2.518, 2.831, 3.527 },
|
||||
{ 1.321, 1.717, 2.074, 2.508, 2.819, 3.505 },
|
||||
{ 1.319, 1.714, 2.069, 2.500, 2.807, 3.485 },
|
||||
{ 1.318, 1.711, 2.064, 2.492, 2.797, 3.467 },
|
||||
{ 1.316, 1.708, 2.060, 2.485, 2.787, 3.450 },
|
||||
{ 1.315, 1.706, 2.056, 2.479, 2.779, 3.435 },
|
||||
{ 1.314, 1.703, 2.052, 2.473, 2.771, 3.421 },
|
||||
{ 1.313, 1.701, 2.048, 2.467, 2.763, 3.408 },
|
||||
{ 1.311, 1.699, 2.045, 2.462, 2.756, 3.396 },
|
||||
{ 1.310, 1.697, 2.042, 2.457, 2.750, 3.385 },
|
||||
{ 1.309, 1.696, 2.040, 2.453, 2.744, 3.375 },
|
||||
{ 1.309, 1.694, 2.037, 2.449, 2.738, 3.365 },
|
||||
{ 1.308, 1.692, 2.035, 2.445, 2.733, 3.356 },
|
||||
{ 1.307, 1.691, 2.032, 2.441, 2.728, 3.348 },
|
||||
{ 1.306, 1.690, 2.030, 2.438, 2.724, 3.340 },
|
||||
{ 1.306, 1.688, 2.028, 2.434, 2.719, 3.333 },
|
||||
{ 1.305, 1.687, 2.026, 2.431, 2.715, 3.326 },
|
||||
{ 1.304, 1.686, 2.024, 2.429, 2.712, 3.319 },
|
||||
{ 1.304, 1.685, 2.023, 2.426, 2.708, 3.313 },
|
||||
{ 1.303, 1.684, 2.021, 2.423, 2.704, 3.307 },
|
||||
{ 1.303, 1.683, 2.020, 2.421, 2.701, 3.301 },
|
||||
{ 1.302, 1.682, 2.018, 2.418, 2.698, 3.296 },
|
||||
{ 1.302, 1.681, 2.017, 2.416, 2.695, 3.291 },
|
||||
{ 1.301, 1.680, 2.015, 2.414, 2.692, 3.286 },
|
||||
{ 1.301, 1.679, 2.014, 2.412, 2.690, 3.281 },
|
||||
{ 1.300, 1.679, 2.013, 2.410, 2.687, 3.277 },
|
||||
{ 1.300, 1.678, 2.012, 2.408, 2.685, 3.273 },
|
||||
{ 1.299, 1.677, 2.011, 2.407, 2.682, 3.269 },
|
||||
{ 1.299, 1.677, 2.010, 2.405, 2.680, 3.265 },
|
||||
{ 1.299, 1.676, 2.009, 2.403, 2.678, 3.261 },
|
||||
{ 1.298, 1.675, 2.008, 2.402, 2.676, 3.258 },
|
||||
{ 1.298, 1.675, 2.007, 2.400, 2.674, 3.255 },
|
||||
{ 1.298, 1.674, 2.006, 2.399, 2.672, 3.251 },
|
||||
{ 1.297, 1.674, 2.005, 2.397, 2.670, 3.248 },
|
||||
{ 1.297, 1.673, 2.004, 2.396, 2.668, 3.245 },
|
||||
{ 1.297, 1.673, 2.003, 2.395, 2.667, 3.242 },
|
||||
{ 1.297, 1.672, 2.002, 2.394, 2.665, 3.239 },
|
||||
{ 1.296, 1.672, 2.002, 2.392, 2.663, 3.237 },
|
||||
{ 1.296, 1.671, 2.001, 2.391, 2.662, 3.234 },
|
||||
{ 1.296, 1.671, 2.000, 2.390, 2.660, 3.232 },
|
||||
{ 1.296, 1.670, 2.000, 2.389, 2.659, 3.229 },
|
||||
{ 1.295, 1.670, 1.999, 2.388, 2.657, 3.227 },
|
||||
{ 1.295, 1.669, 1.998, 2.387, 2.656, 3.225 },
|
||||
{ 1.295, 1.669, 1.998, 2.386, 2.655, 3.223 },
|
||||
{ 1.295, 1.669, 1.997, 2.385, 2.654, 3.220 },
|
||||
{ 1.295, 1.668, 1.997, 2.384, 2.652, 3.218 },
|
||||
{ 1.294, 1.668, 1.996, 2.383, 2.651, 3.216 },
|
||||
{ 1.294, 1.668, 1.995, 2.382, 2.650, 3.214 },
|
||||
{ 1.294, 1.667, 1.995, 2.382, 2.649, 3.213 },
|
||||
{ 1.294, 1.667, 1.994, 2.381, 2.648, 3.211 },
|
||||
{ 1.294, 1.667, 1.994, 2.380, 2.647, 3.209 },
|
||||
{ 1.293, 1.666, 1.993, 2.379, 2.646, 3.207 },
|
||||
{ 1.293, 1.666, 1.993, 2.379, 2.645, 3.206 },
|
||||
{ 1.293, 1.666, 1.993, 2.378, 2.644, 3.204 },
|
||||
{ 1.293, 1.665, 1.992, 2.377, 2.643, 3.202 },
|
||||
{ 1.293, 1.665, 1.992, 2.376, 2.642, 3.201 },
|
||||
{ 1.293, 1.665, 1.991, 2.376, 2.641, 3.199 },
|
||||
{ 1.292, 1.665, 1.991, 2.375, 2.640, 3.198 },
|
||||
{ 1.292, 1.664, 1.990, 2.374, 2.640, 3.197 },
|
||||
{ 1.292, 1.664, 1.990, 2.374, 2.639, 3.195 },
|
||||
{ 1.292, 1.664, 1.990, 2.373, 2.638, 3.194 },
|
||||
{ 1.292, 1.664, 1.989, 2.373, 2.637, 3.193 },
|
||||
{ 1.292, 1.663, 1.989, 2.372, 2.636, 3.191 },
|
||||
{ 1.292, 1.663, 1.989, 2.372, 2.636, 3.190 },
|
||||
{ 1.292, 1.663, 1.988, 2.371, 2.635, 3.189 },
|
||||
{ 1.291, 1.663, 1.988, 2.370, 2.634, 3.188 },
|
||||
{ 1.291, 1.663, 1.988, 2.370, 2.634, 3.187 },
|
||||
{ 1.291, 1.662, 1.987, 2.369, 2.633, 3.185 },
|
||||
{ 1.291, 1.662, 1.987, 2.369, 2.632, 3.184 },
|
||||
{ 1.291, 1.662, 1.987, 2.368, 2.632, 3.183 },
|
||||
{ 1.291, 1.662, 1.986, 2.368, 2.631, 3.182 },
|
||||
{ 1.291, 1.662, 1.986, 2.368, 2.630, 3.181 },
|
||||
{ 1.291, 1.661, 1.986, 2.367, 2.630, 3.180 },
|
||||
{ 1.291, 1.661, 1.986, 2.367, 2.629, 3.179 },
|
||||
{ 1.291, 1.661, 1.985, 2.366, 2.629, 3.178 },
|
||||
{ 1.290, 1.661, 1.985, 2.366, 2.628, 3.177 },
|
||||
{ 1.290, 1.661, 1.985, 2.365, 2.627, 3.176 },
|
||||
{ 1.290, 1.661, 1.984, 2.365, 2.627, 3.175 },
|
||||
{ 1.290, 1.660, 1.984, 2.365, 2.626, 3.175 },
|
||||
{ 1.290, 1.660, 1.984, 2.364, 2.626, 3.174 },
|
||||
};
|
||||
|
||||
const double confidence_level[6] = { 80, 90, 95, 98, 99, 99.5 };
|
||||
}
|
||||
|
||||
|
||||
void StudentTTest::clear()
|
||||
{
|
||||
data[0].clear();
|
||||
data[1].clear();
|
||||
}
|
||||
|
||||
void StudentTTest::add(size_t distribution, double value)
|
||||
{
|
||||
if (distribution > 1)
|
||||
throw std::logic_error("Distribution number for Student's T-Test must be eigther 0 or 1");
|
||||
data[distribution].add(value);
|
||||
}
|
||||
|
||||
/// Confidence_level_index can be set in range [0, 5]. Corresponding values can be found above.
|
||||
std::pair<bool, std::string> StudentTTest::compareAndReport(size_t confidence_level_index) const
|
||||
{
|
||||
if (confidence_level_index > 5)
|
||||
confidence_level_index = 5;
|
||||
|
||||
if (data[0].size == 0 || data[1].size == 0)
|
||||
return {true, ""};
|
||||
|
||||
size_t degrees_of_freedom = (data[0].size - 1) + (data[1].size - 1);
|
||||
|
||||
double table_value = students_table[degrees_of_freedom > 100 ? 0 : degrees_of_freedom][confidence_level_index];
|
||||
|
||||
double pooled_standard_deviation = sqrt(((data[0].size - 1) * data[0].var() + (data[1].size - 1) * data[1].var()) / degrees_of_freedom);
|
||||
|
||||
double t_statistic = pooled_standard_deviation * sqrt(1.0 / data[0].size + 1.0 / data[1].size);
|
||||
|
||||
double mean_difference = fabs(data[0].avg() - data[1].avg());
|
||||
|
||||
double mean_confidence_interval = table_value * t_statistic;
|
||||
|
||||
std::stringstream ss;
|
||||
if (mean_difference > mean_confidence_interval && (mean_difference - mean_confidence_interval > 0.0001)) /// difference must be more than 0.0001, to take into account connection latency.
|
||||
{
|
||||
ss << "Difference at " << confidence_level[confidence_level_index] << "% confidence : ";
|
||||
ss << std::fixed << std::setprecision(8) << "mean difference is " << mean_difference << ", but confidence interval is " << mean_confidence_interval;
|
||||
return {false, ss.str()};
|
||||
}
|
||||
else
|
||||
{
|
||||
ss << "No difference proven at " << confidence_level[confidence_level_index] << "% confidence";
|
||||
return {true, ss.str()};
|
||||
}
|
||||
}
|
||||
|
59
dbms/src/Common/StudentTTest.h
Normal file
59
dbms/src/Common/StudentTTest.h
Normal file
@ -0,0 +1,59 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
/**
|
||||
* About:
|
||||
* This is implementation of Independent two-sample t-test
|
||||
* Read about it on https://en.wikipedia.org/wiki/Student%27s_t-test (Equal or unequal sample sizes, equal variance)
|
||||
*
|
||||
* Usage:
|
||||
* It's it used to assume with some level of confidence that two distributions don't differ.
|
||||
* Values can be added with t_test.add(0/1, value) and after compared and reported with compareAndReport().
|
||||
*/
|
||||
class StudentTTest
|
||||
{
|
||||
private:
|
||||
struct DistributionData
|
||||
{
|
||||
size_t size = 0;
|
||||
double sum = 0;
|
||||
double squares_sum = 0;
|
||||
|
||||
void add(double value)
|
||||
{
|
||||
++size;
|
||||
sum += value;
|
||||
squares_sum += value * value;
|
||||
}
|
||||
|
||||
double avg() const
|
||||
{
|
||||
return sum / size;
|
||||
}
|
||||
|
||||
double var() const
|
||||
{
|
||||
return (squares_sum - (sum * sum / size)) / static_cast<double>(size - 1);
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
size = 0;
|
||||
sum = 0;
|
||||
squares_sum = 0;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<DistributionData, 2> data {};
|
||||
|
||||
public:
|
||||
void clear();
|
||||
|
||||
void add(size_t distribution, double value);
|
||||
|
||||
/// Confidence_level_index can be set in range [0, 5]. Corresponding values can be found above. TODO: Trash - no separation of concepts in code.
|
||||
std::pair<bool, std::string> compareAndReport(size_t confidence_level_index = 5) const;
|
||||
};
|
37
dbms/src/Common/TerminalSize.cpp
Normal file
37
dbms/src/Common/TerminalSize.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int SYSTEM_ERROR;
|
||||
}
|
||||
|
||||
uint16_t getTerminalWidth()
|
||||
{
|
||||
if (isatty(STDIN_FILENO))
|
||||
{
|
||||
winsize terminal_size {};
|
||||
|
||||
if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size))
|
||||
DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
return terminal_size.ws_col;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
po::options_description createOptionsDescription(const std::string & caption, uint16_t terminal_width)
|
||||
{
|
||||
unsigned line_length = po::options_description::m_default_line_length;
|
||||
unsigned min_description_length = line_length / 2;
|
||||
std::string longest_option_desc = "--http_native_compression_disable_checksumming_on_decompress";
|
||||
|
||||
line_length = std::max(static_cast<uint16_t>(longest_option_desc.size()), terminal_width);
|
||||
min_description_length = std::min(min_description_length, line_length - 2);
|
||||
|
||||
return po::options_description(caption, line_length, min_description_length);
|
||||
}
|
16
dbms/src/Common/TerminalSize.h
Normal file
16
dbms/src/Common/TerminalSize.h
Normal file
@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
|
||||
namespace po = boost::program_options;
|
||||
|
||||
|
||||
uint16_t getTerminalWidth();
|
||||
|
||||
/** Creates po::options_description with name and an appropriate size for option displaying
|
||||
* when program is called with option --help
|
||||
* */
|
||||
po::options_description createOptionsDescription(const std::string &caption, unsigned short terminal_width);
|
||||
|
@ -61,6 +61,7 @@ public:
|
||||
InternalTextLogsQueueWeakPtr logs_queue_ptr;
|
||||
|
||||
std::vector<UInt32> thread_numbers;
|
||||
std::vector<UInt32> os_thread_ids;
|
||||
|
||||
/// The first thread created this thread group
|
||||
UInt32 master_thread_number = 0;
|
||||
|
@ -4,7 +4,7 @@ add_headers_and_sources(clickhouse_common_zookeeper .)
|
||||
|
||||
add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} ${clickhouse_common_zookeeper_sources})
|
||||
|
||||
target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY} Threads::Threads)
|
||||
target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY})
|
||||
target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR})
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
|
@ -8,6 +8,5 @@
|
||||
#cmakedefine01 USE_CPUID
|
||||
#cmakedefine01 USE_CPUINFO
|
||||
#cmakedefine01 USE_BROTLI
|
||||
#cmakedefine01 USE_MIMALLOC
|
||||
#cmakedefine01 USE_UNWIND
|
||||
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
|
||||
|
@ -76,8 +76,5 @@ target_link_libraries (cow_compositions PRIVATE clickhouse_common_io)
|
||||
add_executable (stopwatch stopwatch.cpp)
|
||||
target_link_libraries (stopwatch PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (mi_malloc_test mi_malloc_test.cpp)
|
||||
target_link_libraries (mi_malloc_test PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (symbol_index symbol_index.cpp)
|
||||
target_link_libraries (symbol_index PRIVATE clickhouse_common_io)
|
||||
|
@ -1,118 +0,0 @@
|
||||
/** In addition to ClickHouse (Apache 2) license, this file can be also used under MIT license:
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Yandex LLC, Alexey Milovidov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cstdint>
|
||||
#include <random>
|
||||
#include <stdexcept>
|
||||
#include <iostream>
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
//#undef USE_MIMALLOC
|
||||
//#define USE_MIMALLOC 0
|
||||
|
||||
#if USE_MIMALLOC
|
||||
|
||||
#include <mimalloc.h>
|
||||
#define malloc mi_malloc
|
||||
#define free mi_free
|
||||
|
||||
#else
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
size_t total_size{0};
|
||||
|
||||
struct Allocation
|
||||
{
|
||||
void * ptr = nullptr;
|
||||
size_t size = 0;
|
||||
|
||||
Allocation() {}
|
||||
|
||||
Allocation(size_t size_)
|
||||
: size(size_)
|
||||
{
|
||||
ptr = malloc(size);
|
||||
if (!ptr)
|
||||
throw std::runtime_error("Cannot allocate memory");
|
||||
total_size += size;
|
||||
}
|
||||
|
||||
~Allocation()
|
||||
{
|
||||
if (ptr)
|
||||
{
|
||||
free(ptr);
|
||||
total_size -= size;
|
||||
}
|
||||
ptr = nullptr;
|
||||
}
|
||||
|
||||
Allocation(const Allocation &) = delete;
|
||||
|
||||
Allocation(Allocation && rhs)
|
||||
{
|
||||
ptr = rhs.ptr;
|
||||
size = rhs.size;
|
||||
rhs.ptr = nullptr;
|
||||
rhs.size = 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
std::vector<Allocation> allocations;
|
||||
|
||||
constexpr size_t limit = 100000000;
|
||||
constexpr size_t min_alloc_size = 65536;
|
||||
constexpr size_t max_alloc_size = 10000000;
|
||||
|
||||
std::mt19937 rng;
|
||||
auto distribution = std::uniform_int_distribution(min_alloc_size, max_alloc_size);
|
||||
|
||||
size_t total_allocations = 0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
size_t size = distribution(rng);
|
||||
|
||||
while (total_size + size > limit)
|
||||
allocations.pop_back();
|
||||
|
||||
allocations.emplace_back(size);
|
||||
|
||||
++total_allocations;
|
||||
if (total_allocations % (1ULL << 20) == 0)
|
||||
std::cerr << "Total allocations: " << total_allocations << "\n";
|
||||
}
|
||||
}
|
@ -124,6 +124,8 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/// TODO Strange enough, there is no way to detect UB sanitizer.
|
||||
|
||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
|
||||
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
|
||||
|
@ -42,8 +42,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
* but we are not going to do it, because settings is used everywhere as static struct fields.
|
||||
*/
|
||||
|
||||
/// M (mutable) for normal settings, IM (immutable) for not updateable settings.
|
||||
#define LIST_OF_SETTINGS(M, IM) \
|
||||
#define LIST_OF_SETTINGS(M) \
|
||||
M(SettingUInt64, min_compress_block_size, 65536, "The actual size of the block to compress, if the uncompressed data less than max_compress_block_size is no less than this value and no less than the volume of data for one mark.") \
|
||||
M(SettingUInt64, max_compress_block_size, 1048576, "The maximum size of blocks of uncompressed data before compressing for writing to a table.") \
|
||||
M(SettingUInt64, max_block_size, DEFAULT_BLOCK_SIZE, "Maximum block size for reading") \
|
||||
@ -61,7 +60,10 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingSeconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "") \
|
||||
M(SettingSeconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "") \
|
||||
M(SettingSeconds, tcp_keep_alive_timeout, 0, "") \
|
||||
M(SettingMilliseconds, queue_max_wait_ms, 5000, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.") \
|
||||
M(SettingMilliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.") \
|
||||
M(SettingMilliseconds, connection_pool_max_wait_ms, 0, "The wait time when connection pool is full.") \
|
||||
M(SettingMilliseconds, replace_running_query_max_wait_ms, 5000, "The wait time for running query with the same query_id to finish when setting 'replace_running_query' is active.") \
|
||||
M(SettingMilliseconds, kafka_max_wait_ms, 5000, "The wait time for reading from Kafka before retry.") \
|
||||
M(SettingUInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.") \
|
||||
M(SettingUInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.") \
|
||||
M(SettingUInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.") \
|
||||
@ -302,7 +304,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingChar, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.") \
|
||||
M(SettingBool, format_csv_allow_single_quotes, 1, "If it is set to true, allow strings in single quotes.") \
|
||||
M(SettingBool, format_csv_allow_double_quotes, 1, "If it is set to true, allow strings in double quotes.") \
|
||||
M(SettingBool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \N") \
|
||||
M(SettingBool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \\N") \
|
||||
\
|
||||
M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.") \
|
||||
M(SettingBool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.") \
|
||||
@ -347,6 +349,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingSeconds, live_view_heartbeat_interval, DEFAULT_LIVE_VIEW_HEARTBEAT_INTERVAL_SEC, "The heartbeat interval in seconds to indicate live query is alive.") \
|
||||
M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.") \
|
||||
M(SettingUInt64, max_live_view_insert_blocks_before_refresh, 64, "Limit maximum number of inserted blocks after which mergeable blocks are dropped and query is re-executed.") \
|
||||
M(SettingUInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.") \
|
||||
\
|
||||
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
|
||||
\
|
||||
|
@ -17,11 +17,6 @@ class Field;
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int IMMUTABLE_SETTING;
|
||||
}
|
||||
|
||||
/** One setting for any type.
|
||||
* Stores a value within itself, as well as a flag - whether the value was changed.
|
||||
* This is done so that you can send to the remote servers only changed settings (or explicitly specified in the config) values.
|
||||
@ -317,15 +312,12 @@ private:
|
||||
using DeserializeFunction = void (*)(Derived &, ReadBuffer & buf);
|
||||
using CastValueWithoutApplyingFunction = Field (*)(const Field &);
|
||||
|
||||
|
||||
struct MemberInfo
|
||||
{
|
||||
IsChangedFunction is_changed;
|
||||
StringRef name;
|
||||
StringRef description;
|
||||
/// Can be updated after first load for config/definition.
|
||||
/// Non updatable settings can be `changed`,
|
||||
/// if they were overwritten in config/definition.
|
||||
const bool updateable;
|
||||
GetStringFunction get_string;
|
||||
GetFieldFunction get_field;
|
||||
SetStringFunction set_string;
|
||||
@ -405,7 +397,6 @@ public:
|
||||
const_reference(const const_reference & src) = default;
|
||||
const StringRef & getName() const { return member->name; }
|
||||
const StringRef & getDescription() const { return member->description; }
|
||||
bool isUpdateable() const { return member->updateable; }
|
||||
bool isChanged() const { return member->isChanged(*collection); }
|
||||
Field getValue() const { return member->get_field(*collection); }
|
||||
String getValueAsString() const { return member->get_string(*collection); }
|
||||
@ -425,18 +416,6 @@ public:
|
||||
reference(const const_reference & src) : const_reference(src) {}
|
||||
void setValue(const Field & value) { this->member->set_field(*const_cast<Derived *>(this->collection), value); }
|
||||
void setValue(const String & value) { this->member->set_string(*const_cast<Derived *>(this->collection), value); }
|
||||
void updateValue(const Field & value)
|
||||
{
|
||||
if (!this->member->updateable)
|
||||
throw Exception("Setting '" + this->member->name.toString() + "' is restricted for updates.", ErrorCodes::IMMUTABLE_SETTING);
|
||||
setValue(value);
|
||||
}
|
||||
void updateValue(const String & value)
|
||||
{
|
||||
if (!this->member->updateable)
|
||||
throw Exception("Setting '" + this->member->name.toString() + "' is restricted for updates.", ErrorCodes::IMMUTABLE_SETTING);
|
||||
setValue(value);
|
||||
}
|
||||
};
|
||||
|
||||
/// Iterator to iterating through all the settings.
|
||||
@ -519,15 +498,6 @@ public:
|
||||
void set(size_t index, const String & value) { (*this)[index].setValue(value); }
|
||||
void set(const String & name, const String & value) { (*this)[name].setValue(value); }
|
||||
|
||||
/// Updates setting's value. Checks it' mutability.
|
||||
void update(size_t index, const Field & value) { (*this)[index].updateValue(value); }
|
||||
|
||||
void update(const String & name, const Field & value) { (*this)[name].updateValue(value); }
|
||||
|
||||
void update(size_t index, const String & value) { (*this)[index].updateValue(value); }
|
||||
|
||||
void update(const String & name, const String & value) { (*this)[name].updateValue(value); }
|
||||
|
||||
/// Returns value of a setting.
|
||||
Field get(size_t index) const { return (*this)[index].getValue(); }
|
||||
Field get(const String & name) const { return (*this)[name].getValue(); }
|
||||
@ -591,35 +561,19 @@ public:
|
||||
return found_changes;
|
||||
}
|
||||
|
||||
/// Applies change to the settings. Doesn't check settings mutability.
|
||||
void loadFromChange(const SettingChange & change)
|
||||
/// Applies change to concrete setting.
|
||||
void applyChange(const SettingChange & change)
|
||||
{
|
||||
set(change.name, change.value);
|
||||
}
|
||||
|
||||
/// Applies changes to the settings. Should be used in initial settings loading.
|
||||
/// (on table creation or loading from config)
|
||||
void loadFromChanges(const SettingsChanges & changes)
|
||||
/// Applies changes to the settings.
|
||||
void applyChanges(const SettingsChanges & changes)
|
||||
{
|
||||
for (const SettingChange & change : changes)
|
||||
loadFromChange(change);
|
||||
applyChange(change);
|
||||
}
|
||||
|
||||
/// Applies change to the settings, checks settings mutability.
|
||||
void updateFromChange(const SettingChange & change)
|
||||
{
|
||||
update(change.name, change.value);
|
||||
}
|
||||
|
||||
/// Applies changes to the settings. Should be used for settigns update.
|
||||
/// (ALTER MODIFY SETTINGS)
|
||||
void updateFromChanges(const SettingsChanges & changes)
|
||||
{
|
||||
for (const SettingChange & change : changes)
|
||||
updateFromChange(change);
|
||||
}
|
||||
|
||||
|
||||
void copyChangesFrom(const Derived & src)
|
||||
{
|
||||
for (const auto & member : members())
|
||||
@ -663,7 +617,7 @@ public:
|
||||
};
|
||||
|
||||
#define DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS_MACRO) \
|
||||
LIST_OF_SETTINGS_MACRO(DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_, DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_)
|
||||
LIST_OF_SETTINGS_MACRO(DECLARE_SETTINGS_COLLECTION_DECLARE_VARIABLES_HELPER_)
|
||||
|
||||
|
||||
#define IMPLEMENT_SETTINGS_COLLECTION(DERIVED_CLASS_NAME, LIST_OF_SETTINGS_MACRO) \
|
||||
@ -673,9 +627,9 @@ public:
|
||||
using Derived = DERIVED_CLASS_NAME; \
|
||||
struct Functions \
|
||||
{ \
|
||||
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_, IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_) \
|
||||
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_) \
|
||||
}; \
|
||||
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_, IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_) \
|
||||
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_) \
|
||||
}
|
||||
|
||||
|
||||
@ -690,22 +644,14 @@ public:
|
||||
static void NAME##_setField(Derived & collection, const Field & value) { collection.NAME.set(value); } \
|
||||
static void NAME##_serialize(const Derived & collection, WriteBuffer & buf) { collection.NAME.serialize(buf); } \
|
||||
static void NAME##_deserialize(Derived & collection, ReadBuffer & buf) { collection.NAME.deserialize(buf); } \
|
||||
static Field NAME##_castValueWithoutApplying(const Field & value) { TYPE temp{DEFAULT}; temp.set(value); return temp.toField(); }
|
||||
static Field NAME##_castValueWithoutApplying(const Field & value) { TYPE temp{DEFAULT}; temp.set(value); return temp.toField(); } \
|
||||
|
||||
|
||||
#define IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_(TYPE, NAME, DEFAULT, DESCRIPTION) \
|
||||
#define IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_(TYPE, NAME, DEFAULT, DESCRIPTION) \
|
||||
add({[](const Derived & d) { return d.NAME.changed; }, \
|
||||
StringRef(#NAME, strlen(#NAME)), StringRef(#DESCRIPTION, strlen(#DESCRIPTION)), true, \
|
||||
StringRef(#NAME, strlen(#NAME)), StringRef(DESCRIPTION, strlen(DESCRIPTION)), \
|
||||
&Functions::NAME##_getString, &Functions::NAME##_getField, \
|
||||
&Functions::NAME##_setString, &Functions::NAME##_setField, \
|
||||
&Functions::NAME##_serialize, &Functions::NAME##_deserialize, \
|
||||
&Functions::NAME##_castValueWithoutApplying });
|
||||
|
||||
#define IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_(TYPE, NAME, DEFAULT, DESCRIPTION) \
|
||||
add({[](const Derived & d) { return d.NAME.changed; }, \
|
||||
StringRef(#NAME, strlen(#NAME)), StringRef(#DESCRIPTION, strlen(#DESCRIPTION)), false, \
|
||||
&Functions::NAME##_getString, &Functions::NAME##_getField, \
|
||||
&Functions::NAME##_setString, &Functions::NAME##_setField, \
|
||||
&Functions::NAME##_serialize, &Functions::NAME##_deserialize, \
|
||||
&Functions::NAME##_castValueWithoutApplying });
|
||||
}
|
||||
|
@ -314,7 +314,11 @@ private:
|
||||
/// NOTE: Acquire a read lock, therefore f() should be thread safe
|
||||
std::shared_lock lock(children_mutex);
|
||||
|
||||
for (auto & child : children)
|
||||
// Reduce lock scope and avoid recursive locking since that is undefined for shared_mutex.
|
||||
const auto children_copy = children;
|
||||
lock.unlock();
|
||||
|
||||
for (auto & child : children_copy)
|
||||
if (f(*child))
|
||||
return;
|
||||
}
|
||||
|
@ -6,10 +6,6 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#if USE_MIMALLOC
|
||||
#include <Common/MiAllocator.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -43,9 +39,7 @@ struct MarkInCompressedFile
|
||||
}
|
||||
|
||||
};
|
||||
#if USE_MIMALLOC
|
||||
using MarksInCompressedFile = PODArray<MarkInCompressedFile, 4096, MiAllocator>;
|
||||
#else
|
||||
|
||||
using MarksInCompressedFile = PODArray<MarkInCompressedFile>;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <DataStreams/copyData.h>
|
||||
#include <DataStreams/processConstants.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <common/config_common.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <Interpreters/sortBlock.h>
|
||||
@ -21,10 +22,11 @@ namespace DB
|
||||
MergeSortingBlockInputStream::MergeSortingBlockInputStream(
|
||||
const BlockInputStreamPtr & input, SortDescription & description_,
|
||||
size_t max_merged_block_size_, UInt64 limit_, size_t max_bytes_before_remerge_,
|
||||
size_t max_bytes_before_external_sort_, const std::string & tmp_path_)
|
||||
size_t max_bytes_before_external_sort_, const std::string & tmp_path_, size_t min_free_disk_space_)
|
||||
: description(description_), max_merged_block_size(max_merged_block_size_), limit(limit_),
|
||||
max_bytes_before_remerge(max_bytes_before_remerge_),
|
||||
max_bytes_before_external_sort(max_bytes_before_external_sort_), tmp_path(tmp_path_)
|
||||
max_bytes_before_external_sort(max_bytes_before_external_sort_), tmp_path(tmp_path_),
|
||||
min_free_disk_space(min_free_disk_space_)
|
||||
{
|
||||
children.push_back(input);
|
||||
header = children.at(0)->getHeader();
|
||||
@ -77,6 +79,12 @@ Block MergeSortingBlockInputStream::readImpl()
|
||||
*/
|
||||
if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort)
|
||||
{
|
||||
#if !UNBUNDLED
|
||||
auto free_space = Poco::File(tmp_path).freeSpace();
|
||||
if (sum_bytes_in_blocks + min_free_disk_space > free_space)
|
||||
throw Exception("Not enough space for external sort in " + tmp_path, ErrorCodes::NOT_ENOUGH_SPACE);
|
||||
#endif
|
||||
|
||||
Poco::File(tmp_path).createDirectories();
|
||||
temporary_files.emplace_back(std::make_unique<Poco::TemporaryFile>(tmp_path));
|
||||
const std::string & path = temporary_files.back()->path();
|
||||
|
@ -18,6 +18,10 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_ENOUGH_SPACE;
|
||||
}
|
||||
/** Merges stream of sorted each-separately blocks to sorted as-a-whole stream of blocks.
|
||||
* If data to sort is too much, could use external sorting, with temporary files.
|
||||
*/
|
||||
@ -73,7 +77,8 @@ public:
|
||||
MergeSortingBlockInputStream(const BlockInputStreamPtr & input, SortDescription & description_,
|
||||
size_t max_merged_block_size_, UInt64 limit_,
|
||||
size_t max_bytes_before_remerge_,
|
||||
size_t max_bytes_before_external_sort_, const std::string & tmp_path_);
|
||||
size_t max_bytes_before_external_sort_, const std::string & tmp_path_,
|
||||
size_t min_free_disk_space_);
|
||||
|
||||
String getName() const override { return "MergeSorting"; }
|
||||
|
||||
@ -93,6 +98,7 @@ private:
|
||||
size_t max_bytes_before_remerge;
|
||||
size_t max_bytes_before_external_sort;
|
||||
const std::string tmp_path;
|
||||
size_t min_free_disk_space;
|
||||
|
||||
Logger * log = &Logger::get("MergeSortingBlockInputStream");
|
||||
|
||||
|
@ -26,7 +26,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
* Although now any insertion into the table is done via PushingToViewsBlockOutputStream,
|
||||
* but it's clear that here is not the best place for this functionality.
|
||||
*/
|
||||
addTableLock(storage->lockStructureForShare(true, context.getCurrentQueryId()));
|
||||
addTableLock(storage->lockStructureForShare(true, context.getInitialQueryId()));
|
||||
|
||||
/// If the "root" table deduplactes blocks, there are no need to make deduplication for children
|
||||
/// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks
|
||||
|
@ -69,22 +69,19 @@ bool TTLBlockInputStream::isTTLExpired(time_t ttl)
|
||||
|
||||
Block TTLBlockInputStream::readImpl()
|
||||
{
|
||||
/// Skip all data if table ttl is expired for part
|
||||
if (storage.hasTableTTL() && isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
{
|
||||
rows_removed = data_part->rows_count;
|
||||
return {};
|
||||
}
|
||||
|
||||
Block block = children.at(0)->read();
|
||||
if (!block)
|
||||
return block;
|
||||
|
||||
if (storage.hasTableTTL())
|
||||
{
|
||||
/// Skip all data if table ttl is expired for part
|
||||
if (isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
{
|
||||
rows_removed = data_part->rows_count;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (force || isTTLExpired(old_ttl_infos.table_ttl.min))
|
||||
removeRowsWithExpiredTableTTL(block);
|
||||
}
|
||||
if (storage.hasTableTTL() && (force || isTTLExpired(old_ttl_infos.table_ttl.min)))
|
||||
removeRowsWithExpiredTableTTL(block);
|
||||
|
||||
removeValuesWithExpiredColumnTTL(block);
|
||||
|
||||
@ -94,9 +91,9 @@ Block TTLBlockInputStream::readImpl()
|
||||
void TTLBlockInputStream::readSuffixImpl()
|
||||
{
|
||||
for (const auto & elem : new_ttl_infos.columns_ttl)
|
||||
new_ttl_infos.updatePartMinTTL(elem.second.min);
|
||||
new_ttl_infos.updatePartMinMaxTTL(elem.second.min, elem.second.max);
|
||||
|
||||
new_ttl_infos.updatePartMinTTL(new_ttl_infos.table_ttl.min);
|
||||
new_ttl_infos.updatePartMinMaxTTL(new_ttl_infos.table_ttl.min, new_ttl_infos.table_ttl.max);
|
||||
|
||||
data_part->ttl_infos = std::move(new_ttl_infos);
|
||||
data_part->empty_columns = std::move(empty_columns);
|
||||
|
@ -115,26 +115,6 @@ void DatabaseDictionary::removeTable(
|
||||
throw Exception("DatabaseDictionary: removeTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void DatabaseDictionary::renameTable(
|
||||
const Context &,
|
||||
const String &,
|
||||
IDatabase &,
|
||||
const String &)
|
||||
{
|
||||
throw Exception("DatabaseDictionary: renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void DatabaseDictionary::alterTable(
|
||||
const Context &,
|
||||
const String &,
|
||||
const ColumnsDescription &,
|
||||
const IndicesDescription &,
|
||||
const ConstraintsDescription &,
|
||||
const ASTModifier &)
|
||||
{
|
||||
throw Exception("DatabaseDictionary: alterTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
time_t DatabaseDictionary::getTableMetadataModificationTime(
|
||||
const Context &,
|
||||
const String &)
|
||||
|
@ -60,20 +60,6 @@ public:
|
||||
void attachTable(const String & table_name, const StoragePtr & table) override;
|
||||
StoragePtr detachTable(const String & table_name) override;
|
||||
|
||||
void renameTable(
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
IDatabase & to_database,
|
||||
const String & to_table_name) override;
|
||||
|
||||
void alterTable(
|
||||
const Context & context,
|
||||
const String & name,
|
||||
const ColumnsDescription & columns,
|
||||
const IndicesDescription & indices,
|
||||
const ConstraintsDescription & constraints,
|
||||
const ASTModifier & engine_modifier) override;
|
||||
|
||||
time_t getTableMetadataModificationTime(
|
||||
const Context & context,
|
||||
const String & table_name) override;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Common/parseAddress.h>
|
||||
#include "config_core.h"
|
||||
#if USE_MYSQL
|
||||
|
@ -39,26 +39,6 @@ void DatabaseMemory::removeTable(
|
||||
detachTable(table_name);
|
||||
}
|
||||
|
||||
void DatabaseMemory::renameTable(
|
||||
const Context &,
|
||||
const String &,
|
||||
IDatabase &,
|
||||
const String &)
|
||||
{
|
||||
throw Exception("DatabaseMemory: renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void DatabaseMemory::alterTable(
|
||||
const Context &,
|
||||
const String &,
|
||||
const ColumnsDescription &,
|
||||
const IndicesDescription &,
|
||||
const ConstraintsDescription &,
|
||||
const ASTModifier &)
|
||||
{
|
||||
throw Exception("DatabaseMemory: alterTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
time_t DatabaseMemory::getTableMetadataModificationTime(
|
||||
const Context &,
|
||||
const String &)
|
||||
|
@ -37,20 +37,6 @@ public:
|
||||
const Context & context,
|
||||
const String & table_name) override;
|
||||
|
||||
void renameTable(
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
IDatabase & to_database,
|
||||
const String & to_table_name) override;
|
||||
|
||||
void alterTable(
|
||||
const Context & context,
|
||||
const String & name,
|
||||
const ColumnsDescription & columns,
|
||||
const IndicesDescription & indices,
|
||||
const ConstraintsDescription & constraints,
|
||||
const ASTModifier & engine_modifier) override;
|
||||
|
||||
time_t getTableMetadataModificationTime(
|
||||
const Context & context,
|
||||
const String & table_name) override;
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
#include <mysqlxx/Pool.h>
|
||||
#include <Databases/DatabasesCommon.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -61,21 +63,11 @@ public:
|
||||
throw Exception("MySQL database engine does not support attach table.", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void renameTable(const Context &, const String &, IDatabase &, const String &) override
|
||||
{
|
||||
throw Exception("MySQL database engine does not support rename table.", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void createTable(const Context &, const String &, const StoragePtr &, const ASTPtr &) override
|
||||
{
|
||||
throw Exception("MySQL database engine does not support create table.", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void alterTable(const Context &, const String &, const ColumnsDescription &, const IndicesDescription &, const ConstraintsDescription &, const ASTModifier &) override
|
||||
{
|
||||
throw Exception("MySQL database engine does not support alter table.", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
private:
|
||||
struct MySQLStorageInfo
|
||||
{
|
||||
|
@ -135,7 +135,25 @@ void DatabaseOrdinary::loadTables(
|
||||
if (endsWith(dir_it.name(), ".sql.bak"))
|
||||
continue;
|
||||
|
||||
/// There are files .sql.tmp - delete.
|
||||
// There are files that we tried to delete previously
|
||||
static const char * tmp_drop_ext = ".sql.tmp_drop";
|
||||
if (endsWith(dir_it.name(), tmp_drop_ext))
|
||||
{
|
||||
const std::string table_name = dir_it.name().substr(0, dir_it.name().size() - strlen(tmp_drop_ext));
|
||||
if (Poco::File(data_path + '/' + table_name).exists())
|
||||
{
|
||||
Poco::File(dir_it->path()).renameTo(table_name + ".sql");
|
||||
LOG_WARNING(log, "Table " << backQuote(table_name) << " was not dropped previously");
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_INFO(log, "Removing file " << dir_it->path());
|
||||
Poco::File(dir_it->path()).remove();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/// There are files .sql.tmp - delete
|
||||
if (endsWith(dir_it.name(), ".sql.tmp"))
|
||||
{
|
||||
LOG_INFO(log, "Removing file " << dir_it->path());
|
||||
@ -302,6 +320,15 @@ void DatabaseOrdinary::removeTable(
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
try
|
||||
{
|
||||
Poco::File(table_metadata_path + ".tmp_drop").remove();
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__));
|
||||
}
|
||||
attachTable(table_name, res);
|
||||
throw;
|
||||
}
|
||||
@ -355,7 +382,8 @@ void DatabaseOrdinary::renameTable(
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
IDatabase & to_database,
|
||||
const String & to_table_name)
|
||||
const String & to_table_name,
|
||||
TableStructureWriteLockHolder & lock)
|
||||
{
|
||||
DatabaseOrdinary * to_database_concrete = typeid_cast<DatabaseOrdinary *>(&to_database);
|
||||
|
||||
@ -372,7 +400,7 @@ void DatabaseOrdinary::renameTable(
|
||||
{
|
||||
table->rename(context.getPath() + "/data/" + escapeForFileName(to_database_concrete->name) + "/",
|
||||
to_database_concrete->name,
|
||||
to_table_name);
|
||||
to_table_name, lock);
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Databases/DatabasesCommon.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -35,7 +36,8 @@ public:
|
||||
const Context & context,
|
||||
const String & table_name,
|
||||
IDatabase & to_database,
|
||||
const String & to_table_name) override;
|
||||
const String & to_table_name,
|
||||
TableStructureWriteLockHolder &) override;
|
||||
|
||||
void alterTable(
|
||||
const Context & context,
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <mutex>
|
||||
|
||||
|
||||
/// General functionality for several different database engines.
|
||||
|
@ -1,16 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/IAST_fwd.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <Storages/IndicesDescription.h>
|
||||
#include <Storages/ConstraintsDescription.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Poco/File.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <ctime>
|
||||
#include <functional>
|
||||
@ -21,8 +14,16 @@ namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
struct Settings;
|
||||
struct ConstraintsDescription;
|
||||
class ColumnsDescription;
|
||||
struct IndicesDescription;
|
||||
struct TableStructureWriteLockHolder;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
/** Allows to iterate over tables.
|
||||
@ -102,22 +103,29 @@ public:
|
||||
|
||||
/// Rename the table and possibly move the table to another database.
|
||||
virtual void renameTable(
|
||||
const Context & context,
|
||||
const String & name,
|
||||
IDatabase & to_database,
|
||||
const String & to_name) = 0;
|
||||
const Context & /*context*/,
|
||||
const String & /*name*/,
|
||||
IDatabase & /*to_database*/,
|
||||
const String & /*to_name*/,
|
||||
TableStructureWriteLockHolder &)
|
||||
{
|
||||
throw Exception(getEngineName() + ": renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
using ASTModifier = std::function<void(IAST &)>;
|
||||
|
||||
/// Change the table structure in metadata.
|
||||
/// You must call under the TableStructureLock of the corresponding table . If engine_modifier is empty, then engine does not change.
|
||||
virtual void alterTable(
|
||||
const Context & context,
|
||||
const String & name,
|
||||
const ColumnsDescription & columns,
|
||||
const IndicesDescription & indices,
|
||||
const ConstraintsDescription & constraints,
|
||||
const ASTModifier & engine_modifier) = 0;
|
||||
const Context & /*context*/,
|
||||
const String & /*name*/,
|
||||
const ColumnsDescription & /*columns*/,
|
||||
const IndicesDescription & /*indices*/,
|
||||
const ConstraintsDescription & /*constraints*/,
|
||||
const ASTModifier & /*engine_modifier*/)
|
||||
{
|
||||
throw Exception(getEngineName() + ": renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Returns time of table's metadata change, 0 if there is no corresponding metadata file.
|
||||
virtual time_t getTableMetadataModificationTime(
|
||||
|
@ -15,7 +15,7 @@ list(REMOVE_ITEM clickhouse_dictionaries_sources DictionaryFactory.cpp Dictionar
|
||||
list(REMOVE_ITEM clickhouse_dictionaries_headers DictionaryFactory.h DictionarySourceFactory.h DictionaryStructure.h)
|
||||
|
||||
add_library(clickhouse_dictionaries ${clickhouse_dictionaries_sources})
|
||||
target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io ${BTRIE_LIBRARIES} PUBLIC Threads::Threads)
|
||||
target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io ${BTRIE_LIBRARIES})
|
||||
|
||||
if(Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY)
|
||||
target_include_directories(clickhouse_dictionaries SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
|
||||
|
@ -70,6 +70,7 @@ CacheDictionary::CacheDictionary(
|
||||
, dict_struct(dict_struct_)
|
||||
, source_ptr{std::move(source_ptr_)}
|
||||
, dict_lifetime(dict_lifetime_)
|
||||
, log(&Logger::get("ExternalDictionaries"))
|
||||
, size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))}
|
||||
, size_overlap_mask{this->size - 1}
|
||||
, cells{this->size}
|
||||
@ -575,6 +576,12 @@ BlockInputStreamPtr CacheDictionary::getBlockInputStream(const Names & column_na
|
||||
return std::make_shared<BlockInputStreamType>(shared_from_this(), max_block_size, getCachedIds(), column_names);
|
||||
}
|
||||
|
||||
std::exception_ptr CacheDictionary::getLastException() const
|
||||
{
|
||||
const ProfilingScopedReadRWLock read_lock{rw_lock, ProfileEvents::DictCacheLockReadNs};
|
||||
return last_exception;
|
||||
}
|
||||
|
||||
void registerDictionaryCache(DictionaryFactory & factory)
|
||||
{
|
||||
auto create_layout = [=](const std::string & name,
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <shared_mutex>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
#include <common/logger_useful.h>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <pcg_random.hpp>
|
||||
@ -74,6 +75,8 @@ public:
|
||||
void isInVectorConstant(const PaddedPODArray<Key> & child_ids, const Key ancestor_id, PaddedPODArray<UInt8> & out) const override;
|
||||
void isInConstantVector(const Key child_id, const PaddedPODArray<Key> & ancestor_ids, PaddedPODArray<UInt8> & out) const override;
|
||||
|
||||
std::exception_ptr getLastException() const override;
|
||||
|
||||
template <typename T>
|
||||
using ResultArrayType = std::conditional_t<IsDecimalNumber<T>, DecimalPaddedPODArray<T>, PaddedPODArray<T>>;
|
||||
|
||||
@ -253,8 +256,9 @@ private:
|
||||
|
||||
const std::string name;
|
||||
const DictionaryStructure dict_struct;
|
||||
const DictionarySourcePtr source_ptr;
|
||||
mutable DictionarySourcePtr source_ptr;
|
||||
const DictionaryLifetime dict_lifetime;
|
||||
Logger * const log;
|
||||
|
||||
mutable std::shared_mutex rw_lock;
|
||||
|
||||
@ -274,6 +278,10 @@ private:
|
||||
Attribute * hierarchical_attribute = nullptr;
|
||||
std::unique_ptr<ArenaWithFreeLists> string_arena;
|
||||
|
||||
mutable std::exception_ptr last_exception;
|
||||
mutable size_t error_count = 0;
|
||||
mutable std::chrono::system_clock::time_point backoff_end_time;
|
||||
|
||||
mutable pcg64 rnd_engine;
|
||||
|
||||
mutable size_t bytes_allocated = 0;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/ProfilingScopedRWLock.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <common/DateLUT.h>
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <ext/map.h>
|
||||
#include <ext/range.h>
|
||||
@ -243,77 +244,102 @@ template <typename PresentIdHandler, typename AbsentIdHandler>
|
||||
void CacheDictionary::update(
|
||||
const std::vector<Key> & requested_ids, PresentIdHandler && on_cell_updated, AbsentIdHandler && on_id_not_found) const
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::DictCacheRequests};
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequested, requested_ids.size());
|
||||
|
||||
std::unordered_map<Key, UInt8> remaining_ids{requested_ids.size()};
|
||||
for (const auto id : requested_ids)
|
||||
remaining_ids.insert({id, 0});
|
||||
|
||||
std::uniform_int_distribution<UInt64> distribution{dict_lifetime.min_sec, dict_lifetime.max_sec};
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
|
||||
const ProfilingScopedWriteRWLock write_lock{rw_lock, ProfileEvents::DictCacheLockWriteNs};
|
||||
|
||||
if (now > backoff_end_time)
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::DictCacheRequests};
|
||||
Stopwatch watch;
|
||||
auto stream = source_ptr->loadIds(requested_ids);
|
||||
stream->readPrefix();
|
||||
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
|
||||
while (const auto block = stream->read())
|
||||
try
|
||||
{
|
||||
const auto id_column = typeid_cast<const ColumnUInt64 *>(block.safeGetByPosition(0).column.get());
|
||||
if (!id_column)
|
||||
throw Exception{name + ": id column has type different from UInt64.", ErrorCodes::TYPE_MISMATCH};
|
||||
|
||||
const auto & ids = id_column->getData();
|
||||
|
||||
/// cache column pointers
|
||||
const auto column_ptrs = ext::map<std::vector>(
|
||||
ext::range(0, attributes.size()), [&block](size_t i) { return block.safeGetByPosition(i + 1).column.get(); });
|
||||
|
||||
for (const auto i : ext::range(0, ids.size()))
|
||||
if (error_count)
|
||||
{
|
||||
const auto id = ids[i];
|
||||
|
||||
const auto find_result = findCellIdx(id, now);
|
||||
const auto & cell_idx = find_result.cell_idx;
|
||||
|
||||
auto & cell = cells[cell_idx];
|
||||
|
||||
for (const auto attribute_idx : ext::range(0, attributes.size()))
|
||||
{
|
||||
const auto & attribute_column = *column_ptrs[attribute_idx];
|
||||
auto & attribute = attributes[attribute_idx];
|
||||
|
||||
setAttributeValue(attribute, cell_idx, attribute_column[i]);
|
||||
}
|
||||
|
||||
/// if cell id is zero and zero does not map to this cell, then the cell is unused
|
||||
if (cell.id == 0 && cell_idx != zero_cell_idx)
|
||||
element_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
cell.id = id;
|
||||
if (dict_lifetime.min_sec != 0 && dict_lifetime.max_sec != 0)
|
||||
cell.setExpiresAt(std::chrono::system_clock::now() + std::chrono::seconds{distribution(rnd_engine)});
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
/// inform caller
|
||||
on_cell_updated(id, cell_idx);
|
||||
/// mark corresponding id as found
|
||||
remaining_ids[id] = 1;
|
||||
/// Recover after error: we have to clone the source here because
|
||||
/// it could keep connections which should be reset after error.
|
||||
source_ptr = source_ptr->clone();
|
||||
}
|
||||
|
||||
Stopwatch watch;
|
||||
auto stream = source_ptr->loadIds(requested_ids);
|
||||
stream->readPrefix();
|
||||
|
||||
while (const auto block = stream->read())
|
||||
{
|
||||
const auto id_column = typeid_cast<const ColumnUInt64 *>(block.safeGetByPosition(0).column.get());
|
||||
if (!id_column)
|
||||
throw Exception{name + ": id column has type different from UInt64.", ErrorCodes::TYPE_MISMATCH};
|
||||
|
||||
const auto & ids = id_column->getData();
|
||||
|
||||
/// cache column pointers
|
||||
const auto column_ptrs = ext::map<std::vector>(
|
||||
ext::range(0, attributes.size()), [&block](size_t i) { return block.safeGetByPosition(i + 1).column.get(); });
|
||||
|
||||
for (const auto i : ext::range(0, ids.size()))
|
||||
{
|
||||
const auto id = ids[i];
|
||||
|
||||
const auto find_result = findCellIdx(id, now);
|
||||
const auto & cell_idx = find_result.cell_idx;
|
||||
|
||||
auto & cell = cells[cell_idx];
|
||||
|
||||
for (const auto attribute_idx : ext::range(0, attributes.size()))
|
||||
{
|
||||
const auto & attribute_column = *column_ptrs[attribute_idx];
|
||||
auto & attribute = attributes[attribute_idx];
|
||||
|
||||
setAttributeValue(attribute, cell_idx, attribute_column[i]);
|
||||
}
|
||||
|
||||
/// if cell id is zero and zero does not map to this cell, then the cell is unused
|
||||
if (cell.id == 0 && cell_idx != zero_cell_idx)
|
||||
element_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
cell.id = id;
|
||||
if (dict_lifetime.min_sec != 0 && dict_lifetime.max_sec != 0)
|
||||
{
|
||||
std::uniform_int_distribution<UInt64> distribution{dict_lifetime.min_sec, dict_lifetime.max_sec};
|
||||
cell.setExpiresAt(now + std::chrono::seconds{distribution(rnd_engine)});
|
||||
}
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
/// inform caller
|
||||
on_cell_updated(id, cell_idx);
|
||||
/// mark corresponding id as found
|
||||
remaining_ids[id] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
stream->readSuffix();
|
||||
|
||||
error_count = 0;
|
||||
last_exception = std::exception_ptr{};
|
||||
backoff_end_time = std::chrono::system_clock::time_point{};
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheRequestTimeNs, watch.elapsed());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
++error_count;
|
||||
last_exception = std::current_exception();
|
||||
backoff_end_time = now + std::chrono::seconds(ExternalLoadableBackoff{}.calculateDuration(rnd_engine, error_count));
|
||||
|
||||
stream->readSuffix();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequested, requested_ids.size());
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheRequestTimeNs, watch.elapsed());
|
||||
tryLogException(last_exception, log, "Could not update cache dictionary '" + getName() +
|
||||
"', next update is scheduled at " + DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(backoff_end_time)));
|
||||
}
|
||||
}
|
||||
|
||||
size_t not_found_num = 0, found_num = 0;
|
||||
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
/// Check which ids have not been found and require setting null_value
|
||||
for (const auto & id_found_pair : remaining_ids)
|
||||
{
|
||||
@ -328,24 +354,45 @@ void CacheDictionary::update(
|
||||
|
||||
const auto find_result = findCellIdx(id, now);
|
||||
const auto & cell_idx = find_result.cell_idx;
|
||||
|
||||
auto & cell = cells[cell_idx];
|
||||
|
||||
/// Set null_value for each attribute
|
||||
for (auto & attribute : attributes)
|
||||
setDefaultAttributeValue(attribute, cell_idx);
|
||||
if (error_count)
|
||||
{
|
||||
if (find_result.outdated)
|
||||
{
|
||||
/// We have expired data for that `id` so we can continue using it.
|
||||
bool was_default = cell.isDefault();
|
||||
cell.setExpiresAt(backoff_end_time);
|
||||
if (was_default)
|
||||
cell.setDefault();
|
||||
if (was_default)
|
||||
on_id_not_found(id, cell_idx);
|
||||
else
|
||||
on_cell_updated(id, cell_idx);
|
||||
continue;
|
||||
}
|
||||
/// We don't have expired data for that `id` so all we can do is to rethrow `last_exception`.
|
||||
std::rethrow_exception(last_exception);
|
||||
}
|
||||
|
||||
/// Check if cell had not been occupied before and increment element counter if it hadn't
|
||||
if (cell.id == 0 && cell_idx != zero_cell_idx)
|
||||
element_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
cell.id = id;
|
||||
|
||||
if (dict_lifetime.min_sec != 0 && dict_lifetime.max_sec != 0)
|
||||
cell.setExpiresAt(std::chrono::system_clock::now() + std::chrono::seconds{distribution(rnd_engine)});
|
||||
{
|
||||
std::uniform_int_distribution<UInt64> distribution{dict_lifetime.min_sec, dict_lifetime.max_sec};
|
||||
cell.setExpiresAt(now + std::chrono::seconds{distribution(rnd_engine)});
|
||||
}
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
/// Set null_value for each attribute
|
||||
cell.setDefault();
|
||||
for (auto & attribute : attributes)
|
||||
setDefaultAttributeValue(attribute, cell_idx);
|
||||
|
||||
/// inform caller that the cell has not been found
|
||||
on_id_not_found(id, cell_idx);
|
||||
|
@ -56,6 +56,8 @@ struct IDictionaryBase : public IExternalLoadable
|
||||
return source && source->isModified();
|
||||
}
|
||||
|
||||
virtual std::exception_ptr getLastException() const { return {}; }
|
||||
|
||||
std::shared_ptr<IDictionaryBase> shared_from_this()
|
||||
{
|
||||
return std::static_pointer_cast<IDictionaryBase>(IExternalLoadable::shared_from_this());
|
||||
|
@ -65,7 +65,7 @@ FunctionBasePtr FunctionBuilderJoinGet::buildImpl(const ColumnsWithTypeAndName &
|
||||
auto join = storage_join->getJoin();
|
||||
DataTypes data_types(arguments.size());
|
||||
|
||||
auto table_lock = storage_join->lockStructureForShare(false, context.getCurrentQueryId());
|
||||
auto table_lock = storage_join->lockStructureForShare(false, context.getInitialQueryId());
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
data_types[i] = arguments[i].type;
|
||||
|
||||
|
@ -733,16 +733,15 @@ struct JSONExtractTree
|
||||
if (!JSONParser::firstArrayElement(it2))
|
||||
return false;
|
||||
|
||||
size_t index = 0;
|
||||
do
|
||||
for (size_t index = 0; index != nested.size(); ++index)
|
||||
{
|
||||
if (nested[index]->addValueToColumn(tuple.getColumn(index), it2))
|
||||
were_valid_elements = true;
|
||||
else
|
||||
tuple.getColumn(index).insertDefault();
|
||||
++index;
|
||||
if (!JSONParser::nextArrayElement(it2))
|
||||
break;
|
||||
}
|
||||
while (JSONParser::nextArrayElement(it2));
|
||||
|
||||
set_size(old_size + static_cast<size_t>(were_valid_elements));
|
||||
return were_valid_elements;
|
||||
@ -756,16 +755,15 @@ struct JSONExtractTree
|
||||
if (!JSONParser::firstObjectMember(it2))
|
||||
return false;
|
||||
|
||||
size_t index = 0;
|
||||
do
|
||||
for (size_t index = 0; index != nested.size(); ++index)
|
||||
{
|
||||
if (nested[index]->addValueToColumn(tuple.getColumn(index), it2))
|
||||
were_valid_elements = true;
|
||||
else
|
||||
tuple.getColumn(index).insertDefault();
|
||||
++index;
|
||||
if (!JSONParser::nextObjectMember(it2))
|
||||
break;
|
||||
}
|
||||
while (JSONParser::nextObjectMember(it2));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -6,12 +6,14 @@ class FunctionFactory;
|
||||
void registerFunctionAddressToSymbol(FunctionFactory & factory);
|
||||
void registerFunctionDemangle(FunctionFactory & factory);
|
||||
void registerFunctionAddressToLine(FunctionFactory & factory);
|
||||
void registerFunctionTrap(FunctionFactory & factory);
|
||||
|
||||
void registerFunctionsIntrospection(FunctionFactory & factory)
|
||||
{
|
||||
registerFunctionAddressToSymbol(factory);
|
||||
registerFunctionDemangle(factory);
|
||||
registerFunctionAddressToLine(factory);
|
||||
registerFunctionTrap(factory);
|
||||
}
|
||||
|
||||
}
|
||||
|
143
dbms/src/Functions/trap.cpp
Normal file
143
dbms/src/Functions/trap.cpp
Normal file
@ -0,0 +1,143 @@
|
||||
#if 0
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
|
||||
#include <thread>
|
||||
#include <memory>
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
/// Various illegal actions to test diagnostic features of ClickHouse itself. Should not be enabled in production builds.
|
||||
class FunctionTrap : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "trap";
|
||||
static FunctionPtr create(const Context &)
|
||||
{
|
||||
return std::make_shared<FunctionTrap>();
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
size_t getNumberOfArguments() const override
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (!isString(arguments[0]))
|
||||
throw Exception("The only argument for function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
}
|
||||
|
||||
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override
|
||||
{
|
||||
if (const ColumnConst * column = checkAndGetColumnConst<ColumnString>(block.getByPosition(arguments[0]).column.get()))
|
||||
{
|
||||
String mode = column->getValue<String>();
|
||||
|
||||
if (mode == "read nullptr c++")
|
||||
{
|
||||
volatile int x = *reinterpret_cast<const volatile int *>(0);
|
||||
(void)x;
|
||||
}
|
||||
else if (mode == "read nullptr asm")
|
||||
{
|
||||
__asm__ volatile ("movq $0, %rax");
|
||||
__asm__ volatile ("movq (%rax), %rax");
|
||||
}
|
||||
else if (mode == "illegal instruction")
|
||||
{
|
||||
__asm__ volatile ("ud2a");
|
||||
}
|
||||
else if (mode == "abort")
|
||||
{
|
||||
abort();
|
||||
}
|
||||
else if (mode == "use after free")
|
||||
{
|
||||
int * x_ptr;
|
||||
{
|
||||
auto x = std::make_unique<int>();
|
||||
x_ptr = x.get();
|
||||
}
|
||||
*x_ptr = 1;
|
||||
(void)x_ptr;
|
||||
}
|
||||
else if (mode == "use after scope")
|
||||
{
|
||||
volatile int * x_ptr;
|
||||
[&]{
|
||||
volatile int x = 0;
|
||||
x_ptr = &x;
|
||||
(void)x;
|
||||
}();
|
||||
[&]{
|
||||
volatile int y = 1;
|
||||
*x_ptr = 2;
|
||||
(void)y;
|
||||
}();
|
||||
(void)x_ptr;
|
||||
}
|
||||
else if (mode == "uninitialized memory")
|
||||
{
|
||||
int x;
|
||||
(void)write(2, &x, sizeof(x));
|
||||
}
|
||||
else if (mode == "data race")
|
||||
{
|
||||
int x = 0;
|
||||
std::thread t1([&]{ ++x; });
|
||||
std::thread t2([&]{ ++x; });
|
||||
t1.join();
|
||||
t2.join();
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown trap mode", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
else
|
||||
throw Exception("The only argument for function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
block.getByPosition(result).column = block.getByPosition(result).type->createColumnConst(input_rows_count, 0ULL);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void registerFunctionTrap(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionTrap>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class FunctionFactory;
|
||||
void registerFunctionTrap(FunctionFactory &) {}
|
||||
}
|
||||
|
||||
#endif
|
@ -6,11 +6,6 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#if USE_MIMALLOC
|
||||
#include <Common/MiAllocator.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -25,11 +20,7 @@ namespace DB
|
||||
|
||||
struct UncompressedCacheCell
|
||||
{
|
||||
#if USE_MIMALLOC
|
||||
Memory<MiAllocator> data;
|
||||
#else
|
||||
Memory<> data;
|
||||
#endif
|
||||
size_t compressed_size;
|
||||
UInt32 additional_bytes;
|
||||
};
|
||||
|
@ -10,34 +10,38 @@ namespace ProfileEvents
|
||||
{
|
||||
extern const Event CreatedReadBufferOrdinary;
|
||||
extern const Event CreatedReadBufferAIO;
|
||||
extern const Event CreatedReadBufferAIOFailed;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
#if !defined(__linux__)
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(const std::string & filename_, size_t estimated_size,
|
||||
size_t aio_threshold, size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment)
|
||||
{
|
||||
if ((aio_threshold == 0) || (estimated_size < aio_threshold))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary);
|
||||
return std::make_unique<ReadBufferFromFile>(filename_, buffer_size_, flags_, existing_memory_, alignment);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferAIO);
|
||||
return std::make_unique<ReadBufferAIO>(filename_, buffer_size_, flags_, existing_memory_);
|
||||
#else
|
||||
throw Exception("AIO is implemented only on Linux and FreeBSD", ErrorCodes::NOT_IMPLEMENTED);
|
||||
#endif
|
||||
if (aio_threshold && estimated_size >= aio_threshold)
|
||||
{
|
||||
/// Attempt to open a file with O_DIRECT
|
||||
try
|
||||
{
|
||||
auto res = std::make_unique<ReadBufferAIO>(filename_, buffer_size_, flags_, existing_memory_);
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferAIO);
|
||||
return res;
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
/// Fallback to cached IO if O_DIRECT is not supported.
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferAIOFailed);
|
||||
}
|
||||
}
|
||||
#else
|
||||
(void)aio_threshold;
|
||||
(void)estimated_size;
|
||||
#endif
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary);
|
||||
return std::make_unique<ReadBufferFromFile>(filename_, buffer_size_, flags_, existing_memory_, alignment);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,36 +10,39 @@ namespace ProfileEvents
|
||||
{
|
||||
extern const Event CreatedWriteBufferOrdinary;
|
||||
extern const Event CreatedWriteBufferAIO;
|
||||
extern const Event CreatedWriteBufferAIOFailed;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#if !defined(__linux__)
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::unique_ptr<WriteBufferFromFileBase> createWriteBufferFromFileBase(const std::string & filename_, size_t estimated_size,
|
||||
size_t aio_threshold, size_t buffer_size_, int flags_, mode_t mode, char * existing_memory_,
|
||||
size_t alignment)
|
||||
{
|
||||
if ((aio_threshold == 0) || (estimated_size < aio_threshold))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::CreatedWriteBufferOrdinary);
|
||||
return std::make_unique<WriteBufferFromFile>(filename_, buffer_size_, flags_, mode, existing_memory_, alignment);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
ProfileEvents::increment(ProfileEvents::CreatedWriteBufferAIO);
|
||||
return std::make_unique<WriteBufferAIO>(filename_, buffer_size_, flags_, mode, existing_memory_);
|
||||
#else
|
||||
throw Exception("AIO is implemented only on Linux and FreeBSD", ErrorCodes::NOT_IMPLEMENTED);
|
||||
#endif
|
||||
if (aio_threshold && estimated_size >= aio_threshold)
|
||||
{
|
||||
/// Attempt to open a file with O_DIRECT
|
||||
try
|
||||
{
|
||||
auto res = std::make_unique<WriteBufferAIO>(filename_, buffer_size_, flags_, mode, existing_memory_);
|
||||
ProfileEvents::increment(ProfileEvents::CreatedWriteBufferAIO);
|
||||
return res;
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
/// Fallback to cached IO if O_DIRECT is not supported.
|
||||
ProfileEvents::increment(ProfileEvents::CreatedWriteBufferAIOFailed);
|
||||
}
|
||||
}
|
||||
#else
|
||||
(void)aio_threshold;
|
||||
(void)estimated_size;
|
||||
#endif
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::CreatedWriteBufferOrdinary);
|
||||
return std::make_unique<WriteBufferFromFile>(filename_, buffer_size_, flags_, mode, existing_memory_, alignment);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <common/demangle.h>
|
||||
#include <common/config_common.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -639,6 +640,12 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re
|
||||
&& current_memory_usage > static_cast<Int64>(params.max_bytes_before_external_group_by)
|
||||
&& worth_convert_to_two_level)
|
||||
{
|
||||
#if !UNBUNDLED
|
||||
auto free_space = Poco::File(params.tmp_path).freeSpace();
|
||||
if (current_memory_usage + params.min_free_disk_space > free_space)
|
||||
throw Exception("Not enough space for external aggregation in " + params.tmp_path, ErrorCodes::NOT_ENOUGH_SPACE);
|
||||
#endif
|
||||
|
||||
writeToTemporaryFile(result);
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_AGGREGATED_DATA_VARIANT;
|
||||
extern const int NOT_ENOUGH_SPACE;
|
||||
}
|
||||
|
||||
class IBlockOutputStream;
|
||||
@ -195,8 +196,6 @@ struct AggregationMethodString
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
using iterator = typename Data::iterator;
|
||||
using const_iterator = typename Data::const_iterator;
|
||||
|
||||
Data data;
|
||||
|
||||
@ -223,8 +222,6 @@ struct AggregationMethodFixedString
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
using iterator = typename Data::iterator;
|
||||
using const_iterator = typename Data::const_iterator;
|
||||
|
||||
Data data;
|
||||
|
||||
@ -253,8 +250,6 @@ struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
using Data = typename Base::Data;
|
||||
using Key = typename Base::Key;
|
||||
using Mapped = typename Base::Mapped;
|
||||
using iterator = typename Base::iterator;
|
||||
using const_iterator = typename Base::const_iterator;
|
||||
|
||||
using Base::data;
|
||||
|
||||
@ -364,8 +359,6 @@ struct AggregationMethodSerialized
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
using iterator = typename Data::iterator;
|
||||
using const_iterator = typename Data::const_iterator;
|
||||
|
||||
Data data;
|
||||
|
||||
@ -459,8 +452,8 @@ struct AggregatedDataVariants : private boost::noncopyable
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel, true>> nullable_keys256_two_level;
|
||||
|
||||
/// Support for low cardinality.
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key>>> low_cardinality_key8;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key>>> low_cardinality_key16;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false>>> low_cardinality_key8;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false>>> low_cardinality_key16;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64Key>>> low_cardinality_key32;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key>>> low_cardinality_key64;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodString<AggregatedDataWithNullableStringKey>>> low_cardinality_key_string;
|
||||
@ -796,6 +789,7 @@ public:
|
||||
/// Settings is used to determine cache size. No threads are created.
|
||||
size_t max_threads;
|
||||
|
||||
const size_t min_free_disk_space;
|
||||
Params(
|
||||
const Block & src_header_,
|
||||
const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_,
|
||||
@ -803,21 +797,23 @@ public:
|
||||
size_t group_by_two_level_threshold_, size_t group_by_two_level_threshold_bytes_,
|
||||
size_t max_bytes_before_external_group_by_,
|
||||
bool empty_result_for_aggregation_by_empty_set_,
|
||||
const std::string & tmp_path_, size_t max_threads_)
|
||||
const std::string & tmp_path_, size_t max_threads_,
|
||||
size_t min_free_disk_space_)
|
||||
: src_header(src_header_),
|
||||
keys(keys_), aggregates(aggregates_), keys_size(keys.size()), aggregates_size(aggregates.size()),
|
||||
overflow_row(overflow_row_), max_rows_to_group_by(max_rows_to_group_by_), group_by_overflow_mode(group_by_overflow_mode_),
|
||||
group_by_two_level_threshold(group_by_two_level_threshold_), group_by_two_level_threshold_bytes(group_by_two_level_threshold_bytes_),
|
||||
max_bytes_before_external_group_by(max_bytes_before_external_group_by_),
|
||||
empty_result_for_aggregation_by_empty_set(empty_result_for_aggregation_by_empty_set_),
|
||||
tmp_path(tmp_path_), max_threads(max_threads_)
|
||||
tmp_path(tmp_path_), max_threads(max_threads_),
|
||||
min_free_disk_space(min_free_disk_space_)
|
||||
{
|
||||
}
|
||||
|
||||
/// Only parameters that matter during merge.
|
||||
Params(const Block & intermediate_header_,
|
||||
const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_)
|
||||
: Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, "", max_threads_)
|
||||
: Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, "", max_threads_, 0)
|
||||
{
|
||||
intermediate_header = intermediate_header_;
|
||||
}
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -102,22 +101,6 @@ std::unordered_map<String, String> AnalyzedJoin::getOriginalColumnsMap(const Nam
|
||||
return out;
|
||||
}
|
||||
|
||||
void AnalyzedJoin::calculateAvailableJoinedColumns(bool make_nullable)
|
||||
{
|
||||
if (!make_nullable)
|
||||
{
|
||||
available_joined_columns = columns_from_joined_table;
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto & column : columns_from_joined_table)
|
||||
{
|
||||
auto type = column.type->canBeInsideNullable() ? makeNullable(column.type) : column.type;
|
||||
available_joined_columns.emplace_back(NameAndTypePair(column.name, std::move(type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
NamesAndTypesList getNamesAndTypeListFromTableExpression(const ASTTableExpression & table_expression, const Context & context)
|
||||
{
|
||||
NamesAndTypesList names_and_type_list;
|
||||
|
@ -42,8 +42,6 @@ private:
|
||||
|
||||
/// All columns which can be read from joined table. Duplicating names are qualified.
|
||||
NamesAndTypesList columns_from_joined_table;
|
||||
/// Columns from joined table which may be added to block. It's columns_from_joined_table with possibly modified types.
|
||||
NamesAndTypesList available_joined_columns;
|
||||
/// Name -> original name. Names are the same as in columns_from_joined_table list.
|
||||
std::unordered_map<String, String> original_names;
|
||||
/// Original name -> name. Only ranamed columns.
|
||||
@ -61,7 +59,6 @@ public:
|
||||
std::unordered_map<String, String> getOriginalColumnsMap(const NameSet & required_columns) const;
|
||||
|
||||
void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix);
|
||||
void calculateAvailableJoinedColumns(bool make_nullable);
|
||||
size_t rightKeyInclusion(const String & name) const;
|
||||
};
|
||||
|
||||
|
@ -18,6 +18,8 @@ Context removeUserRestrictionsFromSettings(const Context & context, const Settin
|
||||
{
|
||||
Settings new_settings = settings;
|
||||
new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time);
|
||||
new_settings.connection_pool_max_wait_ms = Cluster::saturate(new_settings.connection_pool_max_wait_ms, settings.max_execution_time);
|
||||
new_settings.replace_running_query_max_wait_ms = Cluster::saturate(new_settings.replace_running_query_max_wait_ms, settings.max_execution_time);
|
||||
|
||||
/// Does not matter on remote servers, because queries are sent under different user.
|
||||
new_settings.max_concurrent_queries_for_user = 0;
|
||||
|
@ -1132,7 +1132,7 @@ void Context::updateSettingsChanges(const SettingsChanges & changes)
|
||||
if (change.name == "profile")
|
||||
setProfile(change.value.safeGet<String>());
|
||||
else
|
||||
settings.updateFromChange(change);
|
||||
settings.applyChange(change);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1162,6 +1162,12 @@ String Context::getCurrentQueryId() const
|
||||
}
|
||||
|
||||
|
||||
String Context::getInitialQueryId() const
|
||||
{
|
||||
return client_info.initial_query_id;
|
||||
}
|
||||
|
||||
|
||||
void Context::setCurrentDatabase(const String & name)
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
@ -264,6 +264,10 @@ public:
|
||||
|
||||
String getCurrentDatabase() const;
|
||||
String getCurrentQueryId() const;
|
||||
|
||||
/// Id of initiating query for distributed queries; or current query id if it's not a distributed query.
|
||||
String getInitialQueryId() const;
|
||||
|
||||
void setCurrentDatabase(const String & name);
|
||||
void setCurrentQueryId(const String & query_id);
|
||||
|
||||
|
@ -278,8 +278,8 @@ void ExpressionAction::prepare(Block & sample_block, const Settings & settings,
|
||||
case JOIN:
|
||||
{
|
||||
bool is_null_used_as_default = settings.join_use_nulls;
|
||||
bool right_or_full_join = join_kind == ASTTableJoin::Kind::Right || join_kind == ASTTableJoin::Kind::Full;
|
||||
bool left_or_full_join = join_kind == ASTTableJoin::Kind::Left || join_kind == ASTTableJoin::Kind::Full;
|
||||
bool right_or_full_join = isRightOrFull(join_kind);
|
||||
bool left_or_full_join = isLeftOrFull(join_kind);
|
||||
|
||||
for (auto & col : sample_block)
|
||||
{
|
||||
@ -291,8 +291,8 @@ void ExpressionAction::prepare(Block & sample_block, const Settings & settings,
|
||||
|
||||
bool make_nullable = is_null_used_as_default && right_or_full_join;
|
||||
|
||||
if (make_nullable && !col.type->isNullable())
|
||||
col.type = std::make_shared<DataTypeNullable>(col.type);
|
||||
if (make_nullable && col.type->canBeInsideNullable())
|
||||
col.type = makeNullable(col.type);
|
||||
}
|
||||
|
||||
for (const auto & col : columns_added_by_join)
|
||||
@ -316,8 +316,8 @@ void ExpressionAction::prepare(Block & sample_block, const Settings & settings,
|
||||
}
|
||||
}
|
||||
|
||||
if (make_nullable && !res_type->isNullable())
|
||||
res_type = std::make_shared<DataTypeNullable>(res_type);
|
||||
if (make_nullable && res_type->canBeInsideNullable())
|
||||
res_type = makeNullable(res_type);
|
||||
|
||||
sample_block.insert(ColumnWithTypeAndName(nullptr, res_type, col.name));
|
||||
}
|
||||
@ -726,7 +726,7 @@ void ExpressionActions::addImpl(ExpressionAction action, Names & new_names)
|
||||
new_names.push_back(action.result_name);
|
||||
new_names.insert(new_names.end(), action.array_joined_columns.begin(), action.array_joined_columns.end());
|
||||
|
||||
/// Compiled functions are custom functions and them don't need building
|
||||
/// Compiled functions are custom functions and they don't need building
|
||||
if (action.type == ExpressionAction::APPLY_FUNCTION && !action.is_function_compiled)
|
||||
{
|
||||
if (sample_block.has(action.result_name))
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "ExternalLoader.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <mutex>
|
||||
#include <pcg_random.hpp>
|
||||
#include <common/DateLUT.h>
|
||||
@ -933,6 +932,8 @@ private:
|
||||
class ExternalLoader::PeriodicUpdater : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
static constexpr UInt64 check_period_sec = 5;
|
||||
|
||||
PeriodicUpdater(ConfigFilesReader & config_files_reader_, LoadingDispatcher & loading_dispatcher_)
|
||||
: config_files_reader(config_files_reader_), loading_dispatcher(loading_dispatcher_)
|
||||
{
|
||||
@ -940,11 +941,10 @@ public:
|
||||
|
||||
~PeriodicUpdater() { enable(false); }
|
||||
|
||||
void enable(bool enable_, const ExternalLoaderUpdateSettings & settings_ = {})
|
||||
void enable(bool enable_)
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
enabled = enable_;
|
||||
settings = settings_;
|
||||
|
||||
if (enable_)
|
||||
{
|
||||
@ -985,9 +985,7 @@ public:
|
||||
return std::chrono::system_clock::now() + std::chrono::seconds{distribution(rnd_engine)};
|
||||
}
|
||||
|
||||
std::uniform_int_distribution<UInt64> distribution(0, static_cast<UInt64>(std::exp2(error_count - 1)));
|
||||
std::chrono::seconds delay(std::min<UInt64>(settings.backoff_max_sec, settings.backoff_initial_sec + distribution(rnd_engine)));
|
||||
return std::chrono::system_clock::now() + delay;
|
||||
return std::chrono::system_clock::now() + std::chrono::seconds(ExternalLoadableBackoff{}.calculateDuration(rnd_engine, error_count));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -996,9 +994,8 @@ private:
|
||||
setThreadName("ExterLdrReload");
|
||||
|
||||
std::unique_lock lock{mutex};
|
||||
auto timeout = [this] { return std::chrono::seconds(settings.check_period_sec); };
|
||||
auto pred = [this] { return !enabled; };
|
||||
while (!event.wait_for(lock, timeout(), pred))
|
||||
while (!event.wait_for(lock, std::chrono::seconds(check_period_sec), pred))
|
||||
{
|
||||
lock.unlock();
|
||||
loading_dispatcher.setConfiguration(config_files_reader.read());
|
||||
@ -1012,7 +1009,6 @@ private:
|
||||
|
||||
mutable std::mutex mutex;
|
||||
bool enabled = false;
|
||||
ExternalLoaderUpdateSettings settings;
|
||||
ThreadFromGlobalPool thread;
|
||||
std::condition_variable event;
|
||||
mutable pcg64 rnd_engine{randomSeed()};
|
||||
@ -1051,9 +1047,9 @@ void ExternalLoader::enableAsyncLoading(bool enable)
|
||||
loading_dispatcher->enableAsyncLoading(enable);
|
||||
}
|
||||
|
||||
void ExternalLoader::enablePeriodicUpdates(bool enable_, const ExternalLoaderUpdateSettings & settings_)
|
||||
void ExternalLoader::enablePeriodicUpdates(bool enable_)
|
||||
{
|
||||
periodic_updater->enable(enable_, settings_);
|
||||
periodic_updater->enable(enable_);
|
||||
}
|
||||
|
||||
bool ExternalLoader::hasCurrentlyLoadedObjects() const
|
||||
|
@ -11,19 +11,6 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct ExternalLoaderUpdateSettings
|
||||
{
|
||||
UInt64 check_period_sec = 5;
|
||||
UInt64 backoff_initial_sec = 5;
|
||||
/// 10 minutes
|
||||
UInt64 backoff_max_sec = 10 * 60;
|
||||
|
||||
ExternalLoaderUpdateSettings() = default;
|
||||
ExternalLoaderUpdateSettings(UInt64 check_period_sec_, UInt64 backoff_initial_sec_, UInt64 backoff_max_sec_)
|
||||
: check_period_sec(check_period_sec_), backoff_initial_sec(backoff_initial_sec_), backoff_max_sec(backoff_max_sec_) {}
|
||||
};
|
||||
|
||||
|
||||
/* External configuration structure.
|
||||
*
|
||||
* <external_group>
|
||||
@ -105,7 +92,7 @@ public:
|
||||
void enableAsyncLoading(bool enable);
|
||||
|
||||
/// Sets settings for periodic updates.
|
||||
void enablePeriodicUpdates(bool enable, const ExternalLoaderUpdateSettings & settings = {});
|
||||
void enablePeriodicUpdates(bool enable);
|
||||
|
||||
/// Returns the status of the object.
|
||||
/// If the object has not been loaded yet then the function returns Status::NOT_LOADED.
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <Interpreters/IExternalLoadable.h>
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -16,4 +16,13 @@ ExternalLoadableLifetime::ExternalLoadableLifetime(const Poco::Util::AbstractCon
|
||||
max_sec = has_min ? config.getUInt64(config_prefix + ".max") : min_sec;
|
||||
}
|
||||
|
||||
|
||||
UInt64 ExternalLoadableBackoff::calculateDuration(pcg64 & rnd_engine, size_t error_count) const
|
||||
{
|
||||
if (error_count < 1)
|
||||
error_count = 1;
|
||||
std::uniform_int_distribution<UInt64> distribution(0, static_cast<UInt64>(std::exp2(error_count - 1)));
|
||||
return std::min<UInt64>(backoff_max_sec, backoff_initial_sec + distribution(rnd_engine));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <pcg_random.hpp>
|
||||
#include <Core/Types.h>
|
||||
|
||||
|
||||
@ -25,6 +26,17 @@ struct ExternalLoadableLifetime
|
||||
};
|
||||
|
||||
|
||||
/// Delay before trying to load again after error.
|
||||
struct ExternalLoadableBackoff
|
||||
{
|
||||
UInt64 backoff_initial_sec = 5;
|
||||
UInt64 backoff_max_sec = 10 * 60; /// 10 minutes
|
||||
|
||||
/// Calculates time to try loading again after error.
|
||||
UInt64 calculateDuration(pcg64 & rnd_engine, size_t error_count = 1) const;
|
||||
};
|
||||
|
||||
|
||||
/// Basic interface for external loadable objects. Is used in ExternalLoader.
|
||||
class IExternalLoadable : public std::enable_shared_from_this<IExternalLoadable>, private boost::noncopyable
|
||||
{
|
||||
|
@ -92,7 +92,7 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
||||
table = context.getTable(database_name, table_name);
|
||||
}
|
||||
|
||||
auto table_lock = table->lockStructureForShare(false, context.getCurrentQueryId());
|
||||
auto table_lock = table->lockStructureForShare(false, context.getInitialQueryId());
|
||||
columns = table->getColumns();
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId());
|
||||
/// Drop table data, don't touch metadata
|
||||
database_and_table.second->truncate(query_ptr, context);
|
||||
database_and_table.second->truncate(query_ptr, context, table_lock);
|
||||
}
|
||||
else if (kind == ASTDropQuery::Kind::Drop)
|
||||
{
|
||||
@ -90,11 +90,32 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
|
||||
auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId());
|
||||
/// Delete table metadata and table itself from memory
|
||||
|
||||
const std::string metadata_file_without_extension =
|
||||
database_and_table.first->getMetadataPath()
|
||||
+ escapeForFileName(database_and_table.second->getTableName());
|
||||
|
||||
const auto prev_metadata_name = metadata_file_without_extension + ".sql";
|
||||
const auto drop_metadata_name = metadata_file_without_extension + ".sql.tmp_drop";
|
||||
|
||||
/// Try to rename metadata file and delete the data
|
||||
try
|
||||
{
|
||||
/// There some kind of tables that have no metadata - ignore renaming
|
||||
if (Poco::File(prev_metadata_name).exists())
|
||||
Poco::File(prev_metadata_name).renameTo(drop_metadata_name);
|
||||
/// Delete table data
|
||||
database_and_table.second->drop(table_lock);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (Poco::File(drop_metadata_name).exists())
|
||||
Poco::File(drop_metadata_name).renameTo(prev_metadata_name);
|
||||
throw;
|
||||
}
|
||||
|
||||
/// Delete table metadata and table itself from memory
|
||||
database_and_table.first->removeTable(context, database_and_table.second->getTableName());
|
||||
/// Delete table data
|
||||
database_and_table.second->drop();
|
||||
database_and_table.second->is_dropped = true;
|
||||
|
||||
String database_data_path = database_and_table.first->getDataPath();
|
||||
@ -128,7 +149,7 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(String & table_name, ASTDr
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto table_lock = table->lockExclusively(context.getCurrentQueryId());
|
||||
/// Drop table data, don't touch metadata
|
||||
table->truncate(query_ptr, context);
|
||||
table->truncate(query_ptr, context, table_lock);
|
||||
}
|
||||
else if (kind == ASTDropQuery::Kind::Drop)
|
||||
{
|
||||
@ -137,7 +158,7 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(String & table_name, ASTDr
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto table_lock = table->lockExclusively(context.getCurrentQueryId());
|
||||
/// Delete table data
|
||||
table->drop();
|
||||
table->drop(table_lock);
|
||||
table->is_dropped = true;
|
||||
}
|
||||
}
|
||||
|
@ -38,8 +38,8 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
InterpreterInsertQuery::InterpreterInsertQuery(
|
||||
const ASTPtr & query_ptr_, const Context & context_, bool allow_materialized_)
|
||||
: query_ptr(query_ptr_), context(context_), allow_materialized(allow_materialized_)
|
||||
const ASTPtr & query_ptr_, const Context & context_, bool allow_materialized_, bool no_squash_)
|
||||
: query_ptr(query_ptr_), context(context_), allow_materialized(allow_materialized_), no_squash(no_squash_)
|
||||
{
|
||||
checkStackSize();
|
||||
}
|
||||
@ -100,7 +100,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
checkAccess(query);
|
||||
StoragePtr table = getTable(query);
|
||||
|
||||
auto table_lock = table->lockStructureForShare(true, context.getCurrentQueryId());
|
||||
auto table_lock = table->lockStructureForShare(true, context.getInitialQueryId());
|
||||
|
||||
/// We create a pipeline of several streams, into which we will write data.
|
||||
BlockOutputStreamPtr out;
|
||||
@ -109,7 +109,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
|
||||
/// Do not squash blocks if it is a sync INSERT into Distributed, since it lead to double bufferization on client and server side.
|
||||
/// Client-side bufferization might cause excessive timeouts (especially in case of big blocks).
|
||||
if (!(context.getSettingsRef().insert_distributed_sync && table->isRemote()))
|
||||
if (!(context.getSettingsRef().insert_distributed_sync && table->isRemote()) && !no_squash)
|
||||
{
|
||||
out = std::make_shared<SquashingBlockOutputStream>(
|
||||
out, out->getHeader(), context.getSettingsRef().min_insert_block_size_rows, context.getSettingsRef().min_insert_block_size_bytes);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user