mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 00:30:49 +00:00
Merge branch 'master' into Support_parameterized_view_with_analyzer
This commit is contained in:
commit
76ba60aa24
9
.gitmodules
vendored
9
.gitmodules
vendored
@ -245,6 +245,12 @@
|
||||
[submodule "contrib/idxd-config"]
|
||||
path = contrib/idxd-config
|
||||
url = https://github.com/intel/idxd-config
|
||||
[submodule "contrib/QAT-ZSTD-Plugin"]
|
||||
path = contrib/QAT-ZSTD-Plugin
|
||||
url = https://github.com/intel/QAT-ZSTD-Plugin
|
||||
[submodule "contrib/qatlib"]
|
||||
path = contrib/qatlib
|
||||
url = https://github.com/intel/qatlib
|
||||
[submodule "contrib/wyhash"]
|
||||
path = contrib/wyhash
|
||||
url = https://github.com/wangyi-fudan/wyhash
|
||||
@ -360,3 +366,6 @@
|
||||
[submodule "contrib/sqids-cpp"]
|
||||
path = contrib/sqids-cpp
|
||||
url = https://github.com/sqids/sqids-cpp.git
|
||||
[submodule "contrib/idna"]
|
||||
path = contrib/idna
|
||||
url = https://github.com/ada-url/idna.git
|
||||
|
@ -33,7 +33,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
};
|
||||
}
|
||||
|
||||
constexpr DB::UInt64 max_uint_mask = std::numeric_limits<DB::UInt64>::max();
|
||||
constexpr UInt64 max_uint_mask = std::numeric_limits<UInt64>::max();
|
||||
|
||||
namespace std
|
||||
{
|
||||
@ -114,8 +114,8 @@ namespace std
|
||||
{
|
||||
size_t operator()(const DB::Decimal128 & x) const
|
||||
{
|
||||
return std::hash<DB::Int64>()(x.value >> 64)
|
||||
^ std::hash<DB::Int64>()(x.value & max_uint_mask);
|
||||
return std::hash<Int64>()(x.value >> 64)
|
||||
^ std::hash<Int64>()(x.value & max_uint_mask);
|
||||
}
|
||||
};
|
||||
|
||||
@ -134,8 +134,8 @@ namespace std
|
||||
size_t operator()(const DB::Decimal256 & x) const
|
||||
{
|
||||
// FIXME temp solution
|
||||
return std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value >> 64 & max_uint_mask))
|
||||
^ std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value & max_uint_mask));
|
||||
return std::hash<Int64>()(static_cast<Int64>(x.value >> 64 & max_uint_mask))
|
||||
^ std::hash<Int64>()(static_cast<Int64>(x.value & max_uint_mask));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -3,15 +3,6 @@
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
using Int8 = int8_t;
|
||||
using Int16 = int16_t;
|
||||
using Int32 = int32_t;
|
||||
using Int64 = int64_t;
|
||||
|
||||
#ifndef __cpp_char8_t
|
||||
using char8_t = unsigned char;
|
||||
#endif
|
||||
|
||||
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
|
||||
using UInt8 = char8_t;
|
||||
|
||||
@ -19,24 +10,12 @@ using UInt16 = uint16_t;
|
||||
using UInt32 = uint32_t;
|
||||
using UInt64 = uint64_t;
|
||||
|
||||
using String = std::string;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using UInt8 = ::UInt8;
|
||||
using UInt16 = ::UInt16;
|
||||
using UInt32 = ::UInt32;
|
||||
using UInt64 = ::UInt64;
|
||||
|
||||
using Int8 = ::Int8;
|
||||
using Int16 = ::Int16;
|
||||
using Int32 = ::Int32;
|
||||
using Int64 = ::Int64;
|
||||
using Int8 = int8_t;
|
||||
using Int16 = int16_t;
|
||||
using Int32 = int32_t;
|
||||
using Int64 = int64_t;
|
||||
|
||||
using Float32 = float;
|
||||
using Float64 = double;
|
||||
|
||||
using String = std::string;
|
||||
|
||||
}
|
||||
|
@ -82,3 +82,4 @@ if (SANITIZE_COVERAGE)
|
||||
endif()
|
||||
|
||||
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
||||
set (WITHOUT_COVERAGE_FLAGS_LIST -fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table)
|
||||
|
27
contrib/CMakeLists.txt
vendored
27
contrib/CMakeLists.txt
vendored
@ -154,6 +154,7 @@ add_contrib (libpqxx-cmake libpqxx)
|
||||
add_contrib (libpq-cmake libpq)
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (idna-cmake idna)
|
||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||
add_contrib (incbin-cmake incbin)
|
||||
add_contrib (sqids-cpp-cmake sqids-cpp)
|
||||
@ -171,9 +172,9 @@ add_contrib (s2geometry-cmake s2geometry)
|
||||
add_contrib (c-ares-cmake c-ares)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64 AND ENABLE_SSE42)
|
||||
option (ENABLE_QPL "Enable Intel® Query Processing Library" ${ENABLE_LIBRARIES})
|
||||
option (ENABLE_QPL "Enable Intel® Query Processing Library (QPL)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QPL)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 arch with SSE 4.2 or higher")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 with SSE 4.2 or higher")
|
||||
endif()
|
||||
if (ENABLE_QPL)
|
||||
add_contrib (idxd-config-cmake idxd-config)
|
||||
@ -182,6 +183,28 @@ else()
|
||||
message(STATUS "Not using QPL")
|
||||
endif ()
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QATLIB)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
||||
endif()
|
||||
if (ENABLE_QATLIB)
|
||||
option (ENABLE_QAT_USDM_DRIVER "A User Space DMA-able Memory (USDM) component which allocates/frees DMA-able memory" OFF)
|
||||
option (ENABLE_QAT_OUT_OF_TREE_BUILD "Using out-of-tree driver, user needs to customize ICP_ROOT variable" OFF)
|
||||
set(ICP_ROOT "" CACHE STRING "ICP_ROOT variable to define the path of out-of-tree driver package")
|
||||
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
if (ICP_ROOT STREQUAL "")
|
||||
message(FATAL_ERROR "Please define the path of out-of-tree driver package with -DICP_ROOT=xxx or disable out-of-tree build with -DENABLE_QAT_OUT_OF_TREE_BUILD=OFF; \
|
||||
If you want out-of-tree build but have no package available, please download and build ICP package from: https://www.intel.com/content/www/us/en/download/765501.html")
|
||||
endif ()
|
||||
else()
|
||||
add_contrib (qatlib-cmake qatlib) # requires: isa-l
|
||||
endif ()
|
||||
add_contrib (QAT-ZSTD-Plugin-cmake QAT-ZSTD-Plugin)
|
||||
else()
|
||||
message(STATUS "Not using QATLib")
|
||||
endif ()
|
||||
|
||||
add_contrib (morton-nd-cmake morton-nd)
|
||||
if (ARCH_S390X)
|
||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 2f5f52c4d8c87c2a3a3d101ca3a0194c9b77526f
|
||||
Subproject commit 1278e32bb0d5dc489f947e002bdf8c71b0ddaa63
|
1
contrib/QAT-ZSTD-Plugin
vendored
Submodule
1
contrib/QAT-ZSTD-Plugin
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit e5a134e12d2ea8a5b0f3b83c5b1c325fda4eb0a8
|
85
contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt
Normal file
85
contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,85 @@
|
||||
# Intel® QuickAssist Technology ZSTD Plugin (QAT ZSTD Plugin) is a plugin to Zstandard*(ZSTD*) for accelerating compression by QAT.
|
||||
# ENABLE_QAT_OUT_OF_TREE_BUILD = 1 means kernel don't have native support, user will build and install driver from external package: https://www.intel.com/content/www/us/en/download/765501.html
|
||||
# meanwhile, user need to set ICP_ROOT environment variable which point to the root directory of QAT driver source tree.
|
||||
# ENABLE_QAT_OUT_OF_TREE_BUILD = 0 means kernel has built-in qat driver, QAT-ZSTD-PLUGIN just has dependency on qatlib.
|
||||
|
||||
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
message(STATUS "Intel QATZSTD out-of-tree build, ICP_ROOT:${ICP_ROOT}")
|
||||
|
||||
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
|
||||
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
|
||||
set(ZSTD_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib")
|
||||
set(QAT_INCLUDE_DIR "${ICP_ROOT}/quickassist/include")
|
||||
set(QAT_DC_INCLUDE_DIR "${ICP_ROOT}/quickassist/include/dc")
|
||||
set(QAT_AL_INCLUDE_DIR "${ICP_ROOT}/quickassist/lookaside/access_layer/include")
|
||||
set(QAT_USDM_INCLUDE_DIR "${ICP_ROOT}/quickassist/utilities/libusdm_drv")
|
||||
set(USDM_LIBRARY "${ICP_ROOT}/build/libusdm_drv_s.so")
|
||||
set(QAT_S_LIBRARY "${ICP_ROOT}/build/libqat_s.so")
|
||||
if (ENABLE_QAT_USDM_DRIVER)
|
||||
add_definitions(-DENABLE_USDM_DRV)
|
||||
endif()
|
||||
add_library(_qatzstd_plugin ${QATZSTD_SRC})
|
||||
target_link_libraries (_qatzstd_plugin PUBLIC ${USDM_LIBRARY} ${QAT_S_LIBRARY})
|
||||
target_include_directories(_qatzstd_plugin
|
||||
SYSTEM PUBLIC "${QATZSTD_SRC_DIR}"
|
||||
PRIVATE ${QAT_INCLUDE_DIR}
|
||||
${QAT_DC_INCLUDE_DIR}
|
||||
${QAT_AL_INCLUDE_DIR}
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
else () # In-tree build
|
||||
message(STATUS "Intel QATZSTD in-tree build")
|
||||
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
|
||||
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
|
||||
set(ZSTD_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib")
|
||||
|
||||
# please download&build ICP package from: https://www.intel.com/content/www/us/en/download/765501.html
|
||||
set(ICP_ROOT "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
|
||||
set(QAT_INCLUDE_DIR "${ICP_ROOT}/quickassist/include")
|
||||
set(QAT_DC_INCLUDE_DIR "${ICP_ROOT}/quickassist/include/dc")
|
||||
set(QAT_AL_INCLUDE_DIR "${ICP_ROOT}/quickassist/lookaside/access_layer/include")
|
||||
set(QAT_USDM_INCLUDE_DIR "${ICP_ROOT}/quickassist/utilities/libusdm_drv")
|
||||
set(USDM_LIBRARY "${ICP_ROOT}/build/libusdm_drv_s.so")
|
||||
set(QAT_S_LIBRARY "${ICP_ROOT}/build/libqat_s.so")
|
||||
set(LIBQAT_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
|
||||
set(LIBQAT_HEADER_DIR "${CMAKE_CURRENT_BINARY_DIR}/include")
|
||||
|
||||
file(MAKE_DIRECTORY
|
||||
"${LIBQAT_HEADER_DIR}/qat"
|
||||
)
|
||||
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/include/cpa.h"
|
||||
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
|
||||
)
|
||||
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/include/dc/cpa_dc.h"
|
||||
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
|
||||
)
|
||||
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include/icp_sal_poll.h"
|
||||
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
|
||||
)
|
||||
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include/icp_sal_user.h"
|
||||
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
|
||||
)
|
||||
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/utilities/libusdm_drv/qae_mem.h"
|
||||
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
|
||||
)
|
||||
|
||||
if (ENABLE_QAT_USDM_DRIVER)
|
||||
add_definitions(-DENABLE_USDM_DRV)
|
||||
endif()
|
||||
|
||||
add_library(_qatzstd_plugin ${QATZSTD_SRC})
|
||||
target_link_libraries (_qatzstd_plugin PUBLIC ch_contrib::qatlib ch_contrib::usdm)
|
||||
target_include_directories(_qatzstd_plugin PRIVATE
|
||||
${QAT_INCLUDE_DIR}
|
||||
${QAT_DC_INCLUDE_DIR}
|
||||
${QAT_AL_INCLUDE_DIR}
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR}
|
||||
${LIBQAT_HEADER_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE)
|
||||
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
endif ()
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 060c54dfb0abe869c065143303a9d3e9c54c29e3
|
||||
Subproject commit e71395e44f309f97b5a486f5c2c59b82f85dd2d2
|
1
contrib/idna
vendored
Submodule
1
contrib/idna
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 3c8be01d42b75649f1ac9b697d0ef757eebfe667
|
24
contrib/idna-cmake/CMakeLists.txt
Normal file
24
contrib/idna-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,24 @@
|
||||
option(ENABLE_IDNA "Enable idna support" ${ENABLE_LIBRARIES})
|
||||
if ((NOT ENABLE_IDNA))
|
||||
message (STATUS "Not using idna")
|
||||
return()
|
||||
endif()
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/idna")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/src/idna.cpp"
|
||||
"${LIBRARY_DIR}/src/mapping.cpp"
|
||||
"${LIBRARY_DIR}/src/mapping_tables.cpp"
|
||||
"${LIBRARY_DIR}/src/normalization.cpp"
|
||||
"${LIBRARY_DIR}/src/normalization_tables.cpp"
|
||||
"${LIBRARY_DIR}/src/punycode.cpp"
|
||||
"${LIBRARY_DIR}/src/to_ascii.cpp"
|
||||
"${LIBRARY_DIR}/src/to_unicode.cpp"
|
||||
"${LIBRARY_DIR}/src/unicode_transcoding.cpp"
|
||||
"${LIBRARY_DIR}/src/validity.cpp"
|
||||
)
|
||||
|
||||
add_library (_idna ${SRCS})
|
||||
target_include_directories(_idna PUBLIC "${LIBRARY_DIR}/include")
|
||||
|
||||
add_library (ch_contrib::idna ALIAS _idna)
|
@ -161,6 +161,9 @@ target_include_directories(_jemalloc SYSTEM PRIVATE
|
||||
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE)
|
||||
|
||||
# Because our coverage callbacks call malloc, and recursive call of malloc could not work.
|
||||
target_compile_options(_jemalloc PRIVATE ${WITHOUT_COVERAGE_FLAGS_LIST})
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
target_compile_definitions(_jemalloc PRIVATE
|
||||
-DJEMALLOC_DEBUG=1
|
||||
|
@ -33,7 +33,6 @@ set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/optional.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/random.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/regex.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/d2fixed.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/d2s.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ryu/f2s.cpp"
|
||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
||||
Subproject commit 1834e42289c58402c804a87be4d489892b88f3ec
|
||||
Subproject commit 2568a7cd1297c7c3044b0f3cc0c23a6f6444d856
|
1
contrib/qatlib
vendored
Submodule
1
contrib/qatlib
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit abe15d7bfc083117bfbb4baee0b49ffcd1c03c5c
|
213
contrib/qatlib-cmake/CMakeLists.txt
Normal file
213
contrib/qatlib-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,213 @@
|
||||
# Intel® QuickAssist Technology Library (QATlib).
|
||||
|
||||
message(STATUS "Intel QATlib ON")
|
||||
set(LIBQAT_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
|
||||
set(LIBQAT_DIR "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src")
|
||||
set(LIBOSAL_DIR "${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src")
|
||||
set(OPENSSL_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl")
|
||||
|
||||
# Build 3 libraries: _qatmgr, _osal, _qatlib
|
||||
# Produce ch_contrib::qatlib by linking these libraries.
|
||||
|
||||
# _qatmgr
|
||||
|
||||
SET(LIBQATMGR_sources ${LIBQAT_DIR}/qat_direct/vfio/qat_mgr_client.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/qat_mgr_lib.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/qat_log.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/vfio_lib.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_pfvf_proto.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_pfvf_vf_msg.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_pf.c)
|
||||
|
||||
add_library(_qatmgr ${LIBQATMGR_sources})
|
||||
|
||||
target_include_directories(_qatmgr PRIVATE
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/vfio
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/qat/drivers/crypto/qat/qat_common
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/common/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include)
|
||||
|
||||
target_compile_definitions(_qatmgr PRIVATE -DUSER_SPACE)
|
||||
target_compile_options(_qatmgr PRIVATE -Wno-error=int-conversion)
|
||||
|
||||
# _osal
|
||||
|
||||
SET(LIBOSAL_sources
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalSemaphore.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalThread.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalMutex.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalSpinLock.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalAtomic.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalServices.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalUsrKrnProxy.c
|
||||
${LIBOSAL_DIR}/linux/user_space/OsalCryptoInterface.c)
|
||||
|
||||
add_library(_osal ${LIBOSAL_sources})
|
||||
|
||||
target_include_directories(_osal PRIVATE
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
|
||||
${OPENSSL_DIR}/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_x86_64/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/qatlib-cmake/include)
|
||||
|
||||
target_compile_definitions(_osal PRIVATE -DOSAL_ENSURE_ON -DUSE_OPENSSL)
|
||||
|
||||
# _qatlib
|
||||
SET(LIBQAT_sources
|
||||
${LIBQAT_DIR}/common/compression/dc_buffers.c
|
||||
${LIBQAT_DIR}/common/compression/dc_chain.c
|
||||
${LIBQAT_DIR}/common/compression/dc_datapath.c
|
||||
${LIBQAT_DIR}/common/compression/dc_dp.c
|
||||
${LIBQAT_DIR}/common/compression/dc_header_footer.c
|
||||
${LIBQAT_DIR}/common/compression/dc_header_footer_lz4.c
|
||||
${LIBQAT_DIR}/common/compression/dc_session.c
|
||||
${LIBQAT_DIR}/common/compression/dc_stats.c
|
||||
${LIBQAT_DIR}/common/compression/dc_err_sim.c
|
||||
${LIBQAT_DIR}/common/compression/dc_ns_datapath.c
|
||||
${LIBQAT_DIR}/common/compression/dc_ns_header_footer.c
|
||||
${LIBQAT_DIR}/common/compression/dc_crc32.c
|
||||
${LIBQAT_DIR}/common/compression/dc_crc64.c
|
||||
${LIBQAT_DIR}/common/compression/dc_xxhash32.c
|
||||
${LIBQAT_DIR}/common/compression/icp_sal_dc_err_sim.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_control_path.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_data_path.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_interface_check.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_stats.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/dsa/lac_dsa.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/dsa/lac_dsa_interface_check.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_common.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_montedwds.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_nist_curves.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecdh.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecdsa.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecsm2.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_kpt_ecdsa.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/large_number/lac_ln.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/large_number/lac_ln_interface_check.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_mmp.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_qat_comms.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_utils.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/prime/lac_prime.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/prime/lac_prime_interface_check.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_control_path.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_decrypt.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_encrypt.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_interface_check.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_keygen.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_stats.c
|
||||
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_kpt_rsa_decrypt.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/drbg/lac_sym_drbg_api.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/key/lac_sym_key.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_alg_chain.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_api.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_auth_enc.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_cb.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_cipher.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_compile_check.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_dp.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_hash.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_partial.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_queue.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_stats.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/nrbg/lac_sym_nrbg_api.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_cipher.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_constants_table.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_hash.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_key.c
|
||||
${LIBQAT_DIR}/common/crypto/sym/lac_sym_hash_sw_precomputes.c
|
||||
${LIBQAT_DIR}/common/crypto/kpt/provision/lac_kpt_provision.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_compression.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_create_services.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_ctrl_services.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_list.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_crypto.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_dc_chain.c
|
||||
${LIBQAT_DIR}/common/ctrl/sal_instances.c
|
||||
${LIBQAT_DIR}/common/qat_comms/sal_qat_cmn_msg.c
|
||||
${LIBQAT_DIR}/common/utils/lac_buffer_desc.c
|
||||
${LIBQAT_DIR}/common/utils/lac_log_message.c
|
||||
${LIBQAT_DIR}/common/utils/lac_mem.c
|
||||
${LIBQAT_DIR}/common/utils/lac_mem_pools.c
|
||||
${LIBQAT_DIR}/common/utils/lac_sw_responses.c
|
||||
${LIBQAT_DIR}/common/utils/lac_sync.c
|
||||
${LIBQAT_DIR}/common/utils/sal_service_state.c
|
||||
${LIBQAT_DIR}/common/utils/sal_statistics.c
|
||||
${LIBQAT_DIR}/common/utils/sal_misc_error_stats.c
|
||||
${LIBQAT_DIR}/common/utils/sal_string_parse.c
|
||||
${LIBQAT_DIR}/common/utils/sal_user_process.c
|
||||
${LIBQAT_DIR}/common/utils/sal_versions.c
|
||||
${LIBQAT_DIR}/common/device/sal_dev_info.c
|
||||
${LIBQAT_DIR}/user/sal_user.c
|
||||
${LIBQAT_DIR}/user/sal_user_dyn_instance.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_process_proxy.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_cfg.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_device.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_dyn.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_ETring_mgr_dp.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_init.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_ring.c
|
||||
${LIBQAT_DIR}/qat_direct/common/adf_user_transport_ctrl.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_cfg.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_ring.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_user_bundles.c
|
||||
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_user_proxy.c
|
||||
${LIBQAT_DIR}/common/compression/dc_crc_base.c)
|
||||
|
||||
add_library(_qatlib ${LIBQAT_sources})
|
||||
|
||||
target_include_directories(_qatlib PRIVATE
|
||||
${CMAKE_SYSROOT}/usr/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/libusdm_drv
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
|
||||
${LIBOSAL_DIR}/linux/user_space/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/include/lac
|
||||
${LIBQAT_ROOT_DIR}/quickassist/include/dc
|
||||
${LIBQAT_ROOT_DIR}/quickassist/qat/drivers/crypto/qat/qat_common
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/compression/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/crypto/sym/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/crypto/asym/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/firmware/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/common/include
|
||||
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/vfio
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space
|
||||
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
|
||||
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include)
|
||||
|
||||
target_link_libraries(_qatlib PRIVATE _qatmgr _osal OpenSSL::SSL ch_contrib::isal)
|
||||
target_compile_definitions(_qatlib PRIVATE -DUSER_SPACE -DLAC_BYTE_ORDER=__LITTLE_ENDIAN -DOSAL_ENSURE_ON)
|
||||
target_link_options(_qatlib PRIVATE -pie -z relro -z now -z noexecstack)
|
||||
target_compile_options(_qatlib PRIVATE -march=native)
|
||||
add_library (ch_contrib::qatlib ALIAS _qatlib)
|
||||
|
||||
# _usdm
|
||||
|
||||
set(LIBUSDM_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib/quickassist/utilities/libusdm_drv")
|
||||
set(LIBUSDM_sources
|
||||
${LIBUSDM_DIR}/user_space/vfio/qae_mem_utils_vfio.c
|
||||
${LIBUSDM_DIR}/user_space/qae_mem_utils_common.c
|
||||
${LIBUSDM_DIR}/user_space/vfio/qae_mem_hugepage_utils_vfio.c)
|
||||
|
||||
add_library(_usdm ${LIBUSDM_sources})
|
||||
|
||||
target_include_directories(_usdm PRIVATE
|
||||
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include
|
||||
${LIBUSDM_DIR}
|
||||
${LIBUSDM_DIR}/include
|
||||
${LIBUSDM_DIR}/user_space)
|
||||
|
||||
add_library (ch_contrib::usdm ALIAS _usdm)
|
14
contrib/qatlib-cmake/include/mqueue.h
Normal file
14
contrib/qatlib-cmake/include/mqueue.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* This is a workaround for a build conflict issue
|
||||
1. __GLIBC_PREREQ (referenced in OsalServices.c) is only defined in './sysroot/linux-x86_64/include/features.h'
|
||||
2. mqueue.h only exist under './sysroot/linux-x86_64-musl/'
|
||||
This cause target_include_directories for _osal has a conflict between './sysroot/linux-x86_64/include' and './sysroot/linux-x86_64-musl/'
|
||||
hence create mqueue.h separately under ./qatlib-cmake/include as an alternative.
|
||||
*/
|
||||
|
||||
/* Major and minor version number of the GNU C library package. Use
|
||||
these macros to test for features in specific releases. */
|
||||
#define __GLIBC__ 2
|
||||
#define __GLIBC_MINOR__ 27
|
||||
|
||||
#define __GLIBC_PREREQ(maj, min) \
|
||||
((__GLIBC__ << 16) + __GLIBC_MINOR__ >= ((maj) << 16) + (min))
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 66e3cbec31400ed3a23deb878c5d7f56f990f0ae
|
||||
Subproject commit dead55e60b873d5f70f0e9458fbbba2b2180f430
|
2
contrib/sqids-cpp
vendored
2
contrib/sqids-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit 3756e537d4d48cc0dd4176801fe19f99601439b0
|
||||
Subproject commit a471f53672e98d49223f598528a533b07b085c61
|
@ -41,6 +41,10 @@ readarray -t DISKS_PATHS < <(clickhouse extract-from-config --config-file "$CLIC
|
||||
readarray -t DISKS_METADATA_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.metadata_path' || true)
|
||||
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
CLICKHOUSE_PASSWORD_FILE="${CLICKHOUSE_PASSWORD_FILE:-}"
|
||||
if [[ -n "${CLICKHOUSE_PASSWORD_FILE}" && -f "${CLICKHOUSE_PASSWORD_FILE}" ]]; then
|
||||
CLICKHOUSE_PASSWORD="$(cat "${CLICKHOUSE_PASSWORD_FILE}")"
|
||||
fi
|
||||
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
|
||||
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
|
||||
CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}"
|
||||
|
@ -242,7 +242,7 @@ quit
|
||||
--create-query-fuzzer-runs=50 \
|
||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
$NEW_TESTS_OPT \
|
||||
> >(tail -n 100000 > fuzzer.log) \
|
||||
> fuzzer.log \
|
||||
2>&1 &
|
||||
fuzzer_pid=$!
|
||||
echo "Fuzzer pid is $fuzzer_pid"
|
||||
@ -390,6 +390,7 @@ rg --text -F '<Fatal>' server.log > fatal.log ||:
|
||||
dmesg -T > dmesg.log ||:
|
||||
|
||||
zstd --threads=0 server.log
|
||||
zstd --threads=0 fuzzer.log
|
||||
|
||||
cat > report.html <<EOF ||:
|
||||
<!DOCTYPE html>
|
||||
@ -413,7 +414,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
|
||||
<h1>AST Fuzzer for PR <a href="https://github.com/ClickHouse/ClickHouse/pull/${PR_TO_TEST}">#${PR_TO_TEST}</a> @ ${SHA_TO_TEST}</h1>
|
||||
<p class="links">
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="fuzzer.log.zst">fuzzer.log.zst</a>
|
||||
<a href="server.log.zst">server.log.zst</a>
|
||||
<a href="main.log">main.log</a>
|
||||
<a href="dmesg.log">dmesg.log</a>
|
||||
|
@ -11,14 +11,6 @@ RUN apt-get update -y \
|
||||
npm \
|
||||
&& apt-get clean
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.amazonaws.com"
|
||||
ENV DATASETS="hits visits"
|
||||
|
||||
# The following is already done in clickhouse/stateless-test
|
||||
# RUN npm install -g azurite
|
||||
# RUN npm install tslib
|
||||
|
||||
COPY create.sql /
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
333
docker/test/stateful/create.sql
Normal file
333
docker/test/stateful/create.sql
Normal file
@ -0,0 +1,333 @@
|
||||
ATTACH TABLE datasets.hits_v1 UUID '78ebf6a1-d987-4579-b3ec-00c1a087b1f3'
|
||||
(
|
||||
WatchID UInt64,
|
||||
JavaEnable UInt8,
|
||||
Title String,
|
||||
GoodEvent Int16,
|
||||
EventTime DateTime,
|
||||
EventDate Date,
|
||||
CounterID UInt32,
|
||||
ClientIP UInt32,
|
||||
ClientIP6 FixedString(16),
|
||||
RegionID UInt32,
|
||||
UserID UInt64,
|
||||
CounterClass Int8,
|
||||
OS UInt8,
|
||||
UserAgent UInt8,
|
||||
URL String,
|
||||
Referer String,
|
||||
URLDomain String,
|
||||
RefererDomain String,
|
||||
Refresh UInt8,
|
||||
IsRobot UInt8,
|
||||
RefererCategories Array(UInt16),
|
||||
URLCategories Array(UInt16),
|
||||
URLRegions Array(UInt32),
|
||||
RefererRegions Array(UInt32),
|
||||
ResolutionWidth UInt16,
|
||||
ResolutionHeight UInt16,
|
||||
ResolutionDepth UInt8,
|
||||
FlashMajor UInt8,
|
||||
FlashMinor UInt8,
|
||||
FlashMinor2 String,
|
||||
NetMajor UInt8,
|
||||
NetMinor UInt8,
|
||||
UserAgentMajor UInt16,
|
||||
UserAgentMinor FixedString(2),
|
||||
CookieEnable UInt8,
|
||||
JavascriptEnable UInt8,
|
||||
IsMobile UInt8,
|
||||
MobilePhone UInt8,
|
||||
MobilePhoneModel String,
|
||||
Params String,
|
||||
IPNetworkID UInt32,
|
||||
TraficSourceID Int8,
|
||||
SearchEngineID UInt16,
|
||||
SearchPhrase String,
|
||||
AdvEngineID UInt8,
|
||||
IsArtifical UInt8,
|
||||
WindowClientWidth UInt16,
|
||||
WindowClientHeight UInt16,
|
||||
ClientTimeZone Int16,
|
||||
ClientEventTime DateTime,
|
||||
SilverlightVersion1 UInt8,
|
||||
SilverlightVersion2 UInt8,
|
||||
SilverlightVersion3 UInt32,
|
||||
SilverlightVersion4 UInt16,
|
||||
PageCharset String,
|
||||
CodeVersion UInt32,
|
||||
IsLink UInt8,
|
||||
IsDownload UInt8,
|
||||
IsNotBounce UInt8,
|
||||
FUniqID UInt64,
|
||||
HID UInt32,
|
||||
IsOldCounter UInt8,
|
||||
IsEvent UInt8,
|
||||
IsParameter UInt8,
|
||||
DontCountHits UInt8,
|
||||
WithHash UInt8,
|
||||
HitColor FixedString(1),
|
||||
UTCEventTime DateTime,
|
||||
Age UInt8,
|
||||
Sex UInt8,
|
||||
Income UInt8,
|
||||
Interests UInt16,
|
||||
Robotness UInt8,
|
||||
GeneralInterests Array(UInt16),
|
||||
RemoteIP UInt32,
|
||||
RemoteIP6 FixedString(16),
|
||||
WindowName Int32,
|
||||
OpenerName Int32,
|
||||
HistoryLength Int16,
|
||||
BrowserLanguage FixedString(2),
|
||||
BrowserCountry FixedString(2),
|
||||
SocialNetwork String,
|
||||
SocialAction String,
|
||||
HTTPError UInt16,
|
||||
SendTiming Int32,
|
||||
DNSTiming Int32,
|
||||
ConnectTiming Int32,
|
||||
ResponseStartTiming Int32,
|
||||
ResponseEndTiming Int32,
|
||||
FetchTiming Int32,
|
||||
RedirectTiming Int32,
|
||||
DOMInteractiveTiming Int32,
|
||||
DOMContentLoadedTiming Int32,
|
||||
DOMCompleteTiming Int32,
|
||||
LoadEventStartTiming Int32,
|
||||
LoadEventEndTiming Int32,
|
||||
NSToDOMContentLoadedTiming Int32,
|
||||
FirstPaintTiming Int32,
|
||||
RedirectCount Int8,
|
||||
SocialSourceNetworkID UInt8,
|
||||
SocialSourcePage String,
|
||||
ParamPrice Int64,
|
||||
ParamOrderID String,
|
||||
ParamCurrency FixedString(3),
|
||||
ParamCurrencyID UInt16,
|
||||
GoalsReached Array(UInt32),
|
||||
OpenstatServiceName String,
|
||||
OpenstatCampaignID String,
|
||||
OpenstatAdID String,
|
||||
OpenstatSourceID String,
|
||||
UTMSource String,
|
||||
UTMMedium String,
|
||||
UTMCampaign String,
|
||||
UTMContent String,
|
||||
UTMTerm String,
|
||||
FromTag String,
|
||||
HasGCLID UInt8,
|
||||
RefererHash UInt64,
|
||||
URLHash UInt64,
|
||||
CLID UInt32,
|
||||
YCLID UInt64,
|
||||
ShareService String,
|
||||
ShareURL String,
|
||||
ShareTitle String,
|
||||
"ParsedParams.Key1" Array(String),
|
||||
"ParsedParams.Key2" Array(String),
|
||||
"ParsedParams.Key3" Array(String),
|
||||
"ParsedParams.Key4" Array(String),
|
||||
"ParsedParams.Key5" Array(String),
|
||||
"ParsedParams.ValueDouble" Array(Float64),
|
||||
IslandID FixedString(16),
|
||||
RequestNum UInt32,
|
||||
RequestTry UInt8
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/', max_size = '4G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
||||
|
||||
ATTACH TABLE datasets.visits_v1 UUID '5131f834-711f-4168-98a5-968b691a104b'
|
||||
(
|
||||
CounterID UInt32,
|
||||
StartDate Date,
|
||||
Sign Int8,
|
||||
IsNew UInt8,
|
||||
VisitID UInt64,
|
||||
UserID UInt64,
|
||||
StartTime DateTime,
|
||||
Duration UInt32,
|
||||
UTCStartTime DateTime,
|
||||
PageViews Int32,
|
||||
Hits Int32,
|
||||
IsBounce UInt8,
|
||||
Referer String,
|
||||
StartURL String,
|
||||
RefererDomain String,
|
||||
StartURLDomain String,
|
||||
EndURL String,
|
||||
LinkURL String,
|
||||
IsDownload UInt8,
|
||||
TraficSourceID Int8,
|
||||
SearchEngineID UInt16,
|
||||
SearchPhrase String,
|
||||
AdvEngineID UInt8,
|
||||
PlaceID Int32,
|
||||
RefererCategories Array(UInt16),
|
||||
URLCategories Array(UInt16),
|
||||
URLRegions Array(UInt32),
|
||||
RefererRegions Array(UInt32),
|
||||
IsYandex UInt8,
|
||||
GoalReachesDepth Int32,
|
||||
GoalReachesURL Int32,
|
||||
GoalReachesAny Int32,
|
||||
SocialSourceNetworkID UInt8,
|
||||
SocialSourcePage String,
|
||||
MobilePhoneModel String,
|
||||
ClientEventTime DateTime,
|
||||
RegionID UInt32,
|
||||
ClientIP UInt32,
|
||||
ClientIP6 FixedString(16),
|
||||
RemoteIP UInt32,
|
||||
RemoteIP6 FixedString(16),
|
||||
IPNetworkID UInt32,
|
||||
SilverlightVersion3 UInt32,
|
||||
CodeVersion UInt32,
|
||||
ResolutionWidth UInt16,
|
||||
ResolutionHeight UInt16,
|
||||
UserAgentMajor UInt16,
|
||||
UserAgentMinor UInt16,
|
||||
WindowClientWidth UInt16,
|
||||
WindowClientHeight UInt16,
|
||||
SilverlightVersion2 UInt8,
|
||||
SilverlightVersion4 UInt16,
|
||||
FlashVersion3 UInt16,
|
||||
FlashVersion4 UInt16,
|
||||
ClientTimeZone Int16,
|
||||
OS UInt8,
|
||||
UserAgent UInt8,
|
||||
ResolutionDepth UInt8,
|
||||
FlashMajor UInt8,
|
||||
FlashMinor UInt8,
|
||||
NetMajor UInt8,
|
||||
NetMinor UInt8,
|
||||
MobilePhone UInt8,
|
||||
SilverlightVersion1 UInt8,
|
||||
Age UInt8,
|
||||
Sex UInt8,
|
||||
Income UInt8,
|
||||
JavaEnable UInt8,
|
||||
CookieEnable UInt8,
|
||||
JavascriptEnable UInt8,
|
||||
IsMobile UInt8,
|
||||
BrowserLanguage UInt16,
|
||||
BrowserCountry UInt16,
|
||||
Interests UInt16,
|
||||
Robotness UInt8,
|
||||
GeneralInterests Array(UInt16),
|
||||
Params Array(String),
|
||||
"Goals.ID" Array(UInt32),
|
||||
"Goals.Serial" Array(UInt32),
|
||||
"Goals.EventTime" Array(DateTime),
|
||||
"Goals.Price" Array(Int64),
|
||||
"Goals.OrderID" Array(String),
|
||||
"Goals.CurrencyID" Array(UInt32),
|
||||
WatchIDs Array(UInt64),
|
||||
ParamSumPrice Int64,
|
||||
ParamCurrency FixedString(3),
|
||||
ParamCurrencyID UInt16,
|
||||
ClickLogID UInt64,
|
||||
ClickEventID Int32,
|
||||
ClickGoodEvent Int32,
|
||||
ClickEventTime DateTime,
|
||||
ClickPriorityID Int32,
|
||||
ClickPhraseID Int32,
|
||||
ClickPageID Int32,
|
||||
ClickPlaceID Int32,
|
||||
ClickTypeID Int32,
|
||||
ClickResourceID Int32,
|
||||
ClickCost UInt32,
|
||||
ClickClientIP UInt32,
|
||||
ClickDomainID UInt32,
|
||||
ClickURL String,
|
||||
ClickAttempt UInt8,
|
||||
ClickOrderID UInt32,
|
||||
ClickBannerID UInt32,
|
||||
ClickMarketCategoryID UInt32,
|
||||
ClickMarketPP UInt32,
|
||||
ClickMarketCategoryName String,
|
||||
ClickMarketPPName String,
|
||||
ClickAWAPSCampaignName String,
|
||||
ClickPageName String,
|
||||
ClickTargetType UInt16,
|
||||
ClickTargetPhraseID UInt64,
|
||||
ClickContextType UInt8,
|
||||
ClickSelectType Int8,
|
||||
ClickOptions String,
|
||||
ClickGroupBannerID Int32,
|
||||
OpenstatServiceName String,
|
||||
OpenstatCampaignID String,
|
||||
OpenstatAdID String,
|
||||
OpenstatSourceID String,
|
||||
UTMSource String,
|
||||
UTMMedium String,
|
||||
UTMCampaign String,
|
||||
UTMContent String,
|
||||
UTMTerm String,
|
||||
FromTag String,
|
||||
HasGCLID UInt8,
|
||||
FirstVisit DateTime,
|
||||
PredLastVisit Date,
|
||||
LastVisit Date,
|
||||
TotalVisits UInt32,
|
||||
"TraficSource.ID" Array(Int8),
|
||||
"TraficSource.SearchEngineID" Array(UInt16),
|
||||
"TraficSource.AdvEngineID" Array(UInt8),
|
||||
"TraficSource.PlaceID" Array(UInt16),
|
||||
"TraficSource.SocialSourceNetworkID" Array(UInt8),
|
||||
"TraficSource.Domain" Array(String),
|
||||
"TraficSource.SearchPhrase" Array(String),
|
||||
"TraficSource.SocialSourcePage" Array(String),
|
||||
Attendance FixedString(16),
|
||||
CLID UInt32,
|
||||
YCLID UInt64,
|
||||
NormalizedRefererHash UInt64,
|
||||
SearchPhraseHash UInt64,
|
||||
RefererDomainHash UInt64,
|
||||
NormalizedStartURLHash UInt64,
|
||||
StartURLDomainHash UInt64,
|
||||
NormalizedEndURLHash UInt64,
|
||||
TopLevelDomain UInt64,
|
||||
URLScheme UInt64,
|
||||
OpenstatServiceNameHash UInt64,
|
||||
OpenstatCampaignIDHash UInt64,
|
||||
OpenstatAdIDHash UInt64,
|
||||
OpenstatSourceIDHash UInt64,
|
||||
UTMSourceHash UInt64,
|
||||
UTMMediumHash UInt64,
|
||||
UTMCampaignHash UInt64,
|
||||
UTMContentHash UInt64,
|
||||
UTMTermHash UInt64,
|
||||
FromHash UInt64,
|
||||
WebVisorEnabled UInt8,
|
||||
WebVisorActivity UInt32,
|
||||
"ParsedParams.Key1" Array(String),
|
||||
"ParsedParams.Key2" Array(String),
|
||||
"ParsedParams.Key3" Array(String),
|
||||
"ParsedParams.Key4" Array(String),
|
||||
"ParsedParams.Key5" Array(String),
|
||||
"ParsedParams.ValueDouble" Array(Float64),
|
||||
"Market.Type" Array(UInt8),
|
||||
"Market.GoalID" Array(UInt32),
|
||||
"Market.OrderID" Array(String),
|
||||
"Market.OrderPrice" Array(Int64),
|
||||
"Market.PP" Array(UInt32),
|
||||
"Market.DirectPlaceID" Array(UInt32),
|
||||
"Market.DirectOrderID" Array(UInt32),
|
||||
"Market.DirectBannerID" Array(UInt32),
|
||||
"Market.GoodID" Array(String),
|
||||
"Market.GoodName" Array(String),
|
||||
"Market.GoodQuantity" Array(Int32),
|
||||
"Market.GoodPrice" Array(Int64),
|
||||
IslandID FixedString(16)
|
||||
)
|
||||
ENGINE = CollapsingMergeTree(Sign)
|
||||
PARTITION BY toYYYYMM(StartDate)
|
||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/', max_size = '4G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
@ -44,6 +44,9 @@ if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TR
|
||||
# It is not needed, we will explicitly create tables on s3.
|
||||
# We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository.
|
||||
rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml
|
||||
|
||||
rm /etc/clickhouse-server/config.d/storage_metadata_with_full_object_key.xml
|
||||
rm /etc/clickhouse-server/config.d/s3_storage_policy_with_template_object_key.xml
|
||||
fi
|
||||
|
||||
function start()
|
||||
@ -94,21 +97,9 @@ start
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
|
||||
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||
|
||||
service clickhouse-server restart
|
||||
|
||||
# Wait for server to start accepting connections
|
||||
for _ in {1..120}; do
|
||||
clickhouse-client --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import tarfile
|
||||
import logging
|
||||
import argparse
|
||||
import requests
|
||||
import tempfile
|
||||
|
||||
|
||||
DEFAULT_URL = "https://clickhouse-datasets.s3.amazonaws.com"
|
||||
|
||||
AVAILABLE_DATASETS = {
|
||||
"hits": "hits_v1.tar",
|
||||
"visits": "visits_v1.tar",
|
||||
}
|
||||
|
||||
RETRIES_COUNT = 5
|
||||
|
||||
|
||||
def _get_temp_file_name():
|
||||
return os.path.join(
|
||||
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names())
|
||||
)
|
||||
|
||||
|
||||
def build_url(base_url, dataset):
|
||||
return os.path.join(base_url, dataset, "partitions", AVAILABLE_DATASETS[dataset])
|
||||
|
||||
|
||||
def download_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
for i in range(RETRIES_COUNT):
|
||||
try:
|
||||
with open(path, "wb") as f:
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
total_length = response.headers.get("content-length")
|
||||
if total_length is None or int(total_length) == 0:
|
||||
logging.info(
|
||||
"No content-length, will download file without progress"
|
||||
)
|
||||
f.write(response.content)
|
||||
else:
|
||||
dl = 0
|
||||
total_length = int(total_length)
|
||||
logging.info("Content length is %ld bytes", total_length)
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
dl += len(data)
|
||||
f.write(data)
|
||||
if sys.stdout.isatty():
|
||||
done = int(50 * dl / total_length)
|
||||
percent = int(100 * float(dl) / total_length)
|
||||
sys.stdout.write(
|
||||
"\r[{}{}] {}%".format(
|
||||
"=" * done, " " * (50 - done), percent
|
||||
)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
break
|
||||
except Exception as ex:
|
||||
sys.stdout.write("\n")
|
||||
time.sleep(3)
|
||||
logging.info("Exception while downloading %s, retry %s", ex, i + 1)
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
else:
|
||||
raise Exception(
|
||||
"Cannot download dataset from {}, all retries exceeded".format(url)
|
||||
)
|
||||
|
||||
sys.stdout.write("\n")
|
||||
logging.info("Downloading finished")
|
||||
|
||||
|
||||
def unpack_to_clickhouse_directory(tar_path, clickhouse_path):
|
||||
logging.info(
|
||||
"Will unpack data from temp path %s to clickhouse db %s",
|
||||
tar_path,
|
||||
clickhouse_path,
|
||||
)
|
||||
with tarfile.open(tar_path, "r") as comp_file:
|
||||
comp_file.extractall(path=clickhouse_path)
|
||||
logging.info("Unpack finished")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simple tool for dowloading datasets for clickhouse from S3"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--dataset-names",
|
||||
required=True,
|
||||
nargs="+",
|
||||
choices=list(AVAILABLE_DATASETS.keys()),
|
||||
)
|
||||
parser.add_argument("--url-prefix", default=DEFAULT_URL)
|
||||
parser.add_argument("--clickhouse-data-path", default="/var/lib/clickhouse/")
|
||||
|
||||
args = parser.parse_args()
|
||||
datasets = args.dataset_names
|
||||
logging.info("Will fetch following datasets: %s", ", ".join(datasets))
|
||||
for dataset in datasets:
|
||||
logging.info("Processing %s", dataset)
|
||||
temp_archive_path = _get_temp_file_name()
|
||||
try:
|
||||
download_url_for_dataset = build_url(args.url_prefix, dataset)
|
||||
download_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path)
|
||||
except Exception as ex:
|
||||
logging.info("Some exception occured %s", str(ex))
|
||||
raise
|
||||
finally:
|
||||
logging.info(
|
||||
"Will remove downloaded file %s from filesystem if it exists",
|
||||
temp_archive_path,
|
||||
)
|
||||
if os.path.exists(temp_archive_path):
|
||||
os.remove(temp_archive_path)
|
||||
logging.info("Processing of %s finished", dataset)
|
||||
logging.info("Fetch finished, enjoy your tables!")
|
@ -46,7 +46,7 @@ RUN apt-get update -y \
|
||||
p7zip-full \
|
||||
&& apt-get clean
|
||||
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
RUN pip3 install numpy scipy pandas Jinja2 pyarrow
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -23,8 +23,6 @@ RUN apt-get update -y \
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.amazonaws.com"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -59,12 +59,11 @@ start
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
@ -193,6 +192,7 @@ stop
|
||||
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
export RANDOMIZE_OBJECT_KEY_TYPE=1
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
|
@ -78,6 +78,7 @@ remove_keeper_config "create_if_not_exists" "[01]"
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
@ -117,6 +118,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
|
||||
|
@ -11,7 +11,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
ENGINE = S3Queue(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[SETTINGS]
|
||||
[mode = 'unordered',]
|
||||
[after_processing = 'keep',]
|
||||
|
@ -504,24 +504,25 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
|
||||
|
||||
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted |
|
||||
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------|
|
||||
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
|
||||
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
|
||||
| [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
|
||||
| [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
|
||||
| [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
|
||||
| [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
|
||||
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
|
||||
@ -1143,6 +1144,8 @@ Optional parameters:
|
||||
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
|
||||
- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`.
|
||||
- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here.
|
||||
|
||||
### Configuring the cache
|
||||
|
||||
|
@ -2356,6 +2356,8 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
||||
### Arrow format settings {#parquet-format-settings}
|
||||
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_low_cardinality_as_dictionary) - enable output ClickHouse LowCardinality type as Dictionary Arrow type. Default value - `false`.
|
||||
- [output_format_arrow_use_64_bit_indexes_for_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_use_64_bit_indexes_for_dictionary) - use 64-bit integer type for Dictionary indexes. Default value - `false`.
|
||||
- [output_format_arrow_use_signed_indexes_for_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_use_signed_indexes_for_dictionary) - use signed integer type for Dictionary indexes. Default value - `true`.
|
||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
|
@ -65,6 +65,20 @@ With Cluster Discovery, rather than defining each node explicitly, you simply sp
|
||||
<cluster_name>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/cluster_name</path>
|
||||
|
||||
<!-- # Optional configuration parameters: -->
|
||||
|
||||
<!-- ## Authentication credentials to access all other nodes in cluster: -->
|
||||
<!-- <user>user1</user> -->
|
||||
<!-- <password>pass123</password> -->
|
||||
<!-- ### Alternatively to password, interserver secret may be used: -->
|
||||
<!-- <secret>secret123</secret> -->
|
||||
|
||||
<!-- ## Shard for current node (see below): -->
|
||||
<!-- <shard>1</shard> -->
|
||||
|
||||
<!-- ## Observer mode (see below): -->
|
||||
<!-- <observer/> -->
|
||||
</discovery>
|
||||
</cluster_name>
|
||||
</remote_servers>
|
||||
|
@ -29,10 +29,6 @@ Transactionally inconsistent caching is traditionally provided by client tools o
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
||||
This reduces maintenance effort and avoids redundancy.
|
||||
|
||||
:::note
|
||||
Security consideration: The cached query result is tied to the user executing it. Authorization checks are performed when the query is executed. This means that if there are any alterations to the user's role or permissions between the time the query is cached and when the cache is accessed, the result will not reflect these changes. We recommend using different users to distinguish between different levels of access, instead of actively toggling roles for a single user between queries, as this practice may lead to unexpected query results.
|
||||
:::
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
sidebar_label: Settings Overview
|
||||
title: "Settings Overview"
|
||||
sidebar_position: 1
|
||||
slug: /en/operations/settings/
|
||||
pagination_next: en/operations/settings/settings
|
||||
@ -16,11 +16,34 @@ There are two main groups of ClickHouse settings:
|
||||
- Global server settings
|
||||
- Query-level settings
|
||||
|
||||
The main distinction between global server settings and query-level settings is that
|
||||
global server settings must be set in configuration files while query-level settings
|
||||
can be set in configuration files or with SQL queries.
|
||||
The main distinction between global server settings and query-level settings is that global server settings must be set in configuration files, while query-level settings can be set in configuration files or with SQL queries.
|
||||
|
||||
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
|
||||
|
||||
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level.
|
||||
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query level.
|
||||
|
||||
## See non-default settings
|
||||
|
||||
To view which settings have been changed from their default value:
|
||||
|
||||
```sql
|
||||
SELECT name, value FROM system.settings WHERE changed
|
||||
```
|
||||
|
||||
If you haven't changed any settings from their default value, then ClickHouse will return nothing.
|
||||
|
||||
To check the value of a particular setting, specify the `name` of the setting in your query:
|
||||
|
||||
```sql
|
||||
SELECT name, value FROM system.settings WHERE name = 'max_threads'
|
||||
```
|
||||
|
||||
This command should return something like:
|
||||
|
||||
```response
|
||||
┌─name────────┬─value─────┐
|
||||
│ max_threads │ 'auto(8)' │
|
||||
└─────────────┴───────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
176
docs/en/operations/settings/mysql-binlog-client.md
Normal file
176
docs/en/operations/settings/mysql-binlog-client.md
Normal file
@ -0,0 +1,176 @@
|
||||
# The MySQL Binlog Client
|
||||
|
||||
The MySQL Binlog Client provides a mechanism in ClickHouse to share the binlog from a MySQL instance among multiple [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) databases. This avoids consuming unnecessary bandwidth and CPU when replicating more than one schema/database.
|
||||
|
||||
The implementation is resilient against crashes and disk issues. The executed GTID sets of the binlog itself and the consuming databases have persisted only after the data they describe has been safely persisted as well. The implementation also tolerates re-doing aborted operations (at-least-once delivery).
|
||||
|
||||
# Settings
|
||||
|
||||
## use_binlog_client
|
||||
|
||||
Forces to reuse existing MySQL binlog connection or creates new one if does not exist. The connection is defined by `user:pass@host:port`.
|
||||
|
||||
Default value: 0
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
-- create MaterializedMySQL databases that read the events from the binlog client
|
||||
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1
|
||||
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
|
||||
CREATE DATABASE db3 ENGINE = MaterializedMySQL('host:port', 'db3', 'user2', 'password2') SETTINGS use_binlog_client=1
|
||||
```
|
||||
|
||||
Databases `db1` and `db2` will use the same binlog connection, since they use the same `user:pass@host:port`. Database `db3` will use separate binlog connection.
|
||||
|
||||
## max_bytes_in_binlog_queue
|
||||
|
||||
Defines the limit of bytes in the events binlog queue. If bytes in the queue increases this limit, it will stop reading new events from MySQL until the space for new events will be freed. This introduces the memory limits. Very high value could consume all available memory. Very low value could make the databases to wait for new events.
|
||||
|
||||
Default value: 67108864
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=33554432
|
||||
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
|
||||
```
|
||||
|
||||
If database `db1` is unable to consume binlog events fast enough and the size of the events queue exceeds `33554432` bytes, reading of new events from MySQL is postponed until `db1`
|
||||
consumes the events and releases some space.
|
||||
|
||||
NOTE: This will impact to `db2`, and it will be waiting for new events too, since they share the same connection.
|
||||
|
||||
## max_milliseconds_to_wait_in_binlog_queue
|
||||
|
||||
Defines the max milliseconds to wait when `max_bytes_in_binlog_queue` exceeded. After that it will detach the database from current binlog connection and will retry establish new one to prevent other databases to wait for this database.
|
||||
|
||||
Default value: 10000
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=33554432, max_milliseconds_to_wait_in_binlog_queue=1000
|
||||
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
|
||||
```
|
||||
|
||||
If the event queue of database `db1` is full, the binlog connection will be waiting in `1000`ms and if the database is not able to consume the events, it will be detached from the connection to create another one.
|
||||
|
||||
NOTE: If the database `db1` has been detached from the shared connection and created new one, after the binlog connections for `db1` and `db2` have the same positions they will be merged to one. And `db1` and `db2` will use the same connection again.
|
||||
|
||||
## max_bytes_in_binlog_dispatcher_buffer
|
||||
|
||||
Defines the max bytes in the binlog dispatcher's buffer before it is flushed to attached binlog. The events from MySQL binlog connection are buffered before sending to attached databases. It increases the events throughput from the binlog to databases.
|
||||
|
||||
Default value: 1048576
|
||||
|
||||
## max_flush_milliseconds_in_binlog_dispatcher
|
||||
|
||||
Defines the max milliseconds in the binlog dispatcher's buffer to wait before it is flushed to attached binlog. If there are no events received from MySQL binlog connection for a while, after some time buffered events should be sent to the attached databases.
|
||||
|
||||
Default value: 1000
|
||||
|
||||
# Design
|
||||
|
||||
## The Binlog Events Dispatcher
|
||||
|
||||
Currently each MaterializedMySQL database opens its own connection to MySQL to subscribe to binlog events. There is a need to have only one connection and _dispatch_ the binlog events to all databases that replicate from the same MySQL instance.
|
||||
|
||||
## Each MaterializedMySQL Database Has Its Own Event Queue
|
||||
|
||||
To prevent slowing down other instances there should be an _event queue_ per MaterializedMySQL database to handle the events independently of the speed of other instances. The dispatcher reads an event from the binlog, and sends it to every MaterializedMySQL database that needs it. Each database handles its events in separate threads.
|
||||
|
||||
## Catching up
|
||||
|
||||
If several databases have the same binlog position, they can use the same dispatcher. If a newly created database (or one that has been detached for some time) requests events that have been already processed, we need to create another communication _channel_ to the binlog. We do this by creating another temporary dispatcher for such databases. When the new dispatcher _catches up with_ the old one, the new/temporary dispatcher is not needed anymore and all databases getting events from this dispatcher can be moved to the old one.
|
||||
|
||||
## Memory Limit
|
||||
|
||||
There is a _memory limit_ to control event queue memory consumption per MySQL Client. If a database is not able to handle events fast enough, and the event queue is getting full, we have the following options:
|
||||
|
||||
1. The dispatcher is blocked until the slowest database frees up space for new events. All other databases are waiting for the slowest one. (Preferred)
|
||||
2. The dispatcher is _never_ blocked, but suspends incremental sync for the slow database and continues dispatching events to remained databases.
|
||||
|
||||
## Performance
|
||||
|
||||
A lot of CPU can be saved by not processing every event in every database. The binlog contains events for all databases, it is wasteful to distribute row events to a database that it will not process it, especially if there are a lot of databases. This requires some sort of per-database binlog filtering and buffering.
|
||||
|
||||
Currently all events are sent to all MaterializedMySQL databases but parsing the event which consumes CPU is up to the database.
|
||||
|
||||
# Detailed Design
|
||||
|
||||
1. If a client (e.g. database) wants to read a stream of the events from MySQL binlog, it creates a connection to remote binlog by host/user/password and _executed GTID set_ params.
|
||||
2. If another client wants to read the events from the binlog but for different _executed GTID set_, it is **not** possible to reuse existing connection to MySQL, then need to create another connection to the same remote binlog. (_This is how it is implemented today_).
|
||||
3. When these 2 connections get the same binlog positions, they read the same events. It is logical to drop duplicate connection and move all its users out. And now one connection dispatches binlog events to several clients. Obviously only connections to the same binlog should be merged.
|
||||
|
||||
## Classes
|
||||
|
||||
1. One connection can send (or dispatch) events to several clients and might be called `BinlogEventsDispatcher`.
|
||||
2. Several dispatchers grouped by _user:password@host:port_ in `BinlogClient`. Since they point to the same binlog.
|
||||
3. The clients should communicate only with public API from `BinlogClient`. The result of using `BinlogClient` is an object that implements `IBinlog` to read events from. This implementation of `IBinlog` must be compatible with old implementation `MySQLFlavor` -> when replacing old implementation by new one, the behavior must not be changed.
|
||||
|
||||
## SQL
|
||||
|
||||
```sql
|
||||
-- create MaterializedMySQL databases that read the events from the binlog client
|
||||
CREATE DATABASE db1_client1 ENGINE = MaterializedMySQL('host:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=1024;
|
||||
CREATE DATABASE db2_client1 ENGINE = MaterializedMySQL('host:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1;
|
||||
CREATE DATABASE db3_client1 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1;
|
||||
CREATE DATABASE db4_client2 ENGINE = MaterializedMySQL('host2:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1;
|
||||
CREATE DATABASE db5_client3 ENGINE = MaterializedMySQL('host:port', 'db', 'user1', 'password') SETTINGS use_binlog_client=1;
|
||||
CREATE DATABASE db6_old ENGINE = MaterializedMySQL('host:port', 'db', 'user1', 'password') SETTINGS use_binlog_client=0;
|
||||
```
|
||||
|
||||
Databases `db1_client1`, `db2_client1` and `db3_client1` share one instance of `BinlogClient` since they have the same params. `BinlogClient` will create 3 connections to MySQL server thus 3 instances of `BinlogEventsDispatcher`, but if these connections would have the same binlog position, they should be merged to one connection. Means all clients will be moved to one dispatcher and others will be closed. Databases `db4_client2` and `db5_client3` would use 2 different independent `BinlogClient` instances. Database `db6_old` will use old implementation. NOTE: By default `use_binlog_client` is disabled. Setting `max_bytes_in_binlog_queue` defines the max allowed bytes in the binlog queue. By default, it is `1073741824` bytes. If number of bytes exceeds this limit, the dispatching will be stopped until the space will be freed for new events.
|
||||
|
||||
## Binlog Table Structure
|
||||
|
||||
To see the status of the all `BinlogClient` instances there is `system.mysql_binlogs` system table. It shows the list of all created and _alive_ `IBinlog` instances with information about its `BinlogEventsDispatcher` and `BinlogClient`.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
SELECT * FROM system.mysql_binlogs FORMAT Vertical
|
||||
Row 1:
|
||||
──────
|
||||
binlog_client_name: root@127.0.0.1:3306
|
||||
name: test_Clickhouse1
|
||||
mysql_binlog_name: binlog.001154
|
||||
mysql_binlog_pos: 7142294
|
||||
mysql_binlog_timestamp: 1660082447
|
||||
mysql_binlog_executed_gtid_set: a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30523304
|
||||
dispatcher_name: Applier
|
||||
dispatcher_mysql_binlog_name: binlog.001154
|
||||
dispatcher_mysql_binlog_pos: 7142294
|
||||
dispatcher_mysql_binlog_timestamp: 1660082447
|
||||
dispatcher_mysql_binlog_executed_gtid_set: a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30523304
|
||||
size: 0
|
||||
bytes: 0
|
||||
max_bytes: 0
|
||||
```
|
||||
|
||||
### Tests
|
||||
|
||||
Unit tests:
|
||||
|
||||
```
|
||||
$ ./unit_tests_dbms --gtest_filter=MySQLBinlog.*
|
||||
```
|
||||
|
||||
Integration tests:
|
||||
|
||||
```
|
||||
$ pytest -s -vv test_materialized_mysql_database/test.py::test_binlog_client
|
||||
```
|
||||
|
||||
Dumps events from the file
|
||||
|
||||
```
|
||||
$ ./utils/check-mysql-binlog/check-mysql-binlog --binlog binlog.001392
|
||||
```
|
||||
|
||||
Dumps events from the server
|
||||
|
||||
```
|
||||
$ ./utils/check-mysql-binlog/check-mysql-binlog --host 127.0.0.1 --port 3306 --user root --password pass --gtid a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30462856
|
||||
```
|
@ -1269,6 +1269,28 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
### output_format_arrow_use_signed_indexes_for_dictionary {#output_format_arrow_use_signed_indexes_for_dictionary}
|
||||
|
||||
Use signed integer types instead of unsigned in `DICTIONARY` type of the [Arrow](../../interfaces/formats.md/#data-format-arrow) format during [LowCardinality](../../sql-reference/data-types/lowcardinality.md) output when `output_format_arrow_low_cardinality_as_dictionary` is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Unsigned integer types are used for indexes in `DICTIONARY` type.
|
||||
- 1 — Signed integer types are used for indexes in `DICTIONARY` type.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
### output_format_arrow_use_64_bit_indexes_for_dictionary {#output_format_arrow_use_64_bit_indexes_for_dictionary}
|
||||
|
||||
Use 64-bit integer type in `DICTIONARY` type of the [Arrow](../../interfaces/formats.md/#data-format-arrow) format during [LowCardinality](../../sql-reference/data-types/lowcardinality.md) output when `output_format_arrow_low_cardinality_as_dictionary` is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Type for indexes in `DICTIONARY` type is determined automatically.
|
||||
- 1 — 64-bit integer type is used for indexes in `DICTIONARY` type.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
### output_format_arrow_string_as_string {#output_format_arrow_string_as_string}
|
||||
|
||||
Use Arrow String type instead of Binary for String columns.
|
||||
|
@ -4773,6 +4773,45 @@ Type: Int64
|
||||
|
||||
Default: 0
|
||||
|
||||
## enable_deflate_qpl_codec {#enable_deflate_qpl_codec}
|
||||
|
||||
If turned on, the DEFLATE_QPL codec may be used to compress columns.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Type: Bool
|
||||
|
||||
## enable_zstd_qat_codec {#enable_zstd_qat_codec}
|
||||
|
||||
If turned on, the ZSTD_QAT codec may be used to compress columns.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Type: Bool
|
||||
|
||||
## output_format_compression_level
|
||||
|
||||
Default compression level if query output is compressed. The setting is applied when `SELECT` query has `INTO OUTFILE` or when writing to table functions `file`, `url`, `hdfs`, `s3`, or `azureBlobStorage`.
|
||||
|
||||
Possible values: from `1` to `22`
|
||||
|
||||
Default: `3`
|
||||
|
||||
|
||||
## output_format_compression_zstd_window_log
|
||||
|
||||
Can be used when the output compression method is `zstd`. If greater than `0`, this setting explicitly sets compression window size (power of `2`) and enables a long-range mode for zstd compression. This can help to achieve a better compression ratio.
|
||||
|
||||
Possible values: non-negative numbers. Note that if the value is too small or too big, `zstdlib` will throw an exception. Typical values are from `20` (window size = `1MB`) to `30` (window size = `1GB`).
|
||||
|
||||
Default: `0`
|
||||
|
||||
## rewrite_count_distinct_if_with_count_distinct_implementation
|
||||
|
||||
Allows you to rewrite `countDistcintIf` with [count_distinct_implementation](#count_distinct_implementation) setting.
|
||||
@ -5157,4 +5196,4 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
|
||||
:::note
|
||||
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
||||
:::
|
||||
:::
|
||||
|
14
docs/en/operations/system-tables/dropped_tables_parts.md
Normal file
14
docs/en/operations/system-tables/dropped_tables_parts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/dropped_tables_parts
|
||||
---
|
||||
# dropped_tables_parts {#system_tables-dropped_tables_parts}
|
||||
|
||||
Contains information about parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) dropped tables from [system.dropped_tables](./dropped_tables.md)
|
||||
|
||||
The schema of this table is the same as [system.parts](./parts.md)
|
||||
|
||||
**See Also**
|
||||
|
||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
- [system.parts](./parts.md)
|
||||
- [system.dropped_tables](./dropped_tables.md)
|
@ -42,7 +42,7 @@ Columns:
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Query starting date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Query starting time.
|
||||
- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — Query starting time with microseconds precision.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Query starting time with microseconds precision.
|
||||
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision.
|
||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
|
@ -10,7 +10,7 @@ Columns:
|
||||
- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
|
||||
- `event_date` (Date) — Date of the entry.
|
||||
- `event_time` (DateTime) — Time of the entry.
|
||||
- `event_time_microseconds` (DateTime) — Time of the entry with microseconds precision.
|
||||
- `event_time_microseconds` (DateTime64) — Time of the entry with microseconds precision.
|
||||
- `microseconds` (UInt32) — Microseconds of the entry.
|
||||
- `thread_name` (String) — Name of the thread from which the logging was done.
|
||||
- `thread_id` (UInt64) — OS thread ID.
|
||||
|
@ -11,6 +11,8 @@ Keys:
|
||||
- `--query` — Format queries of any length and complexity.
|
||||
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
|
||||
- `--oneline` — Format in single line.
|
||||
- `--max_line_length` — Format in single line queries with length less than specified.
|
||||
- `--comments` — Keep comments in the output.
|
||||
- `--quiet` or `-q` — Just check syntax, no output on success.
|
||||
- `--multiquery` or `-n` — Allow multiple queries in the same file.
|
||||
- `--obfuscate` — Obfuscate instead of formatting.
|
||||
|
@ -24,7 +24,7 @@ A client application to interact with clickhouse-keeper by its native protocol.
|
||||
## Example {#clickhouse-keeper-client-example}
|
||||
|
||||
```bash
|
||||
./clickhouse-keeper-client -h localhost:9181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30
|
||||
./clickhouse-keeper-client -h localhost -p 9181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30
|
||||
Connected to ZooKeeper at [::1]:9181 with session_id 137
|
||||
/ :) ls
|
||||
keeper foo bar
|
||||
|
@ -18,6 +18,12 @@ Supported range of values: \[1970-01-01 00:00:00, 2106-02-07 06:28:15\].
|
||||
|
||||
Resolution: 1 second.
|
||||
|
||||
## Speed
|
||||
|
||||
The `Date` datatype is faster than `DateTime` under _most_ conditions.
|
||||
|
||||
The `Date` type requires 2 bytes of storage, while `DateTime` requires 4. However, when the database compresses the database, this difference is amplified. This amplification is due to the minutes and seconds in `DateTime` being less compressible. Filtering and aggregating `Date` instead of `DateTime` is also faster.
|
||||
|
||||
## Usage Remarks
|
||||
|
||||
The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time), regardless of the time zone or daylight saving time. The time zone affects how the values of the `DateTime` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01’).
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Arrays
|
||||
|
||||
# Array Functions
|
||||
|
||||
## empty
|
||||
## empty {#empty}
|
||||
|
||||
Checks whether the input array is empty.
|
||||
|
||||
@ -50,7 +50,7 @@ Result:
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## notEmpty
|
||||
## notEmpty {#notempty}
|
||||
|
||||
Checks whether the input array is non-empty.
|
||||
|
||||
@ -221,7 +221,7 @@ SELECT has([1, 2, NULL], NULL)
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## hasAll
|
||||
## hasAll {#hasall}
|
||||
|
||||
Checks whether one array is a subset of another.
|
||||
|
||||
@ -261,7 +261,7 @@ Raises an exception `NO_COMMON_TYPE` if the set and subset elements do not share
|
||||
|
||||
`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0.
|
||||
|
||||
## hasAny
|
||||
## hasAny {#hasany}
|
||||
|
||||
Checks whether two arrays have intersection by some elements.
|
||||
|
||||
|
@ -1777,34 +1777,67 @@ Result:
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## sqid
|
||||
## sqidEncode
|
||||
|
||||
Transforms numbers into a [Sqid](https://sqids.org/) which is a YouTube-like ID string.
|
||||
Encodes numbers as a [Sqid](https://sqids.org/) which is a YouTube-like ID string.
|
||||
The output alphabet is `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`.
|
||||
Do not use this function for hashing - the generated IDs can be decoded back into numbers.
|
||||
Do not use this function for hashing - the generated IDs can be decoded back into the original numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
sqid(number1, ...)
|
||||
sqidEncode(number1, ...)
|
||||
```
|
||||
|
||||
Alias: `sqid`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- A variable number of UInt8, UInt16, UInt32 or UInt64 numbers.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
A hash id [String](/docs/en/sql-reference/data-types/string.md).
|
||||
A sqid [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT sqid(1, 2, 3, 4, 5);
|
||||
SELECT sqidEncode(1, 2, 3, 4, 5);
|
||||
```
|
||||
|
||||
```response
|
||||
┌─sqid(1, 2, 3, 4, 5)─┐
|
||||
│ gXHfJ1C6dN │
|
||||
└─────────────────────┘
|
||||
┌─sqidEncode(1, 2, 3, 4, 5)─┐
|
||||
│ gXHfJ1C6dN │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
## sqidDecode
|
||||
|
||||
Decodes a [Sqid](https://sqids.org/) back into its original numbers.
|
||||
Returns an empty array in case the input string is not a valid sqid.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
sqidDecode(sqid)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- A sqid - [String](/docs/en/sql-reference/data-types/string.md)
|
||||
|
||||
**Returned Value**
|
||||
|
||||
The sqid transformed to numbers [Array(UInt64)](/docs/en/sql-reference/data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT sqidDecode('gXHfJ1C6dN');
|
||||
```
|
||||
|
||||
```response
|
||||
┌─sqidDecode('gXHfJ1C6dN')─┐
|
||||
│ [1,2,3,4,5] │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
@ -731,7 +731,7 @@ Alias: `FROM_BASE64`.
|
||||
|
||||
Like `base64Decode` but returns an empty string in case of error.
|
||||
|
||||
## endsWith
|
||||
## endsWith {#endswith}
|
||||
|
||||
Returns whether string `str` ends with `suffix`.
|
||||
|
||||
@ -765,7 +765,7 @@ Result:
|
||||
└──────────────────────────┴──────────────────────┘
|
||||
```
|
||||
|
||||
## startsWith
|
||||
## startsWith {#startswith}
|
||||
|
||||
Returns whether string `str` starts with `prefix`.
|
||||
|
||||
@ -1383,6 +1383,148 @@ Result:
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## punycodeEncode
|
||||
|
||||
Returns the [Punycode](https://en.wikipedia.org/wiki/Punycode) representation of a string.
|
||||
The string must be UTF8-encoded, otherwise the behavior is undefined.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
punycodeEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A Punycode representation of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select punycodeEncode('München');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─punycodeEncode('München')─┐
|
||||
│ Mnchen-3ya │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
## punycodeDecode
|
||||
|
||||
Returns the UTF8-encoded plaintext of a [Punycode](https://en.wikipedia.org/wiki/Punycode)-encoded string.
|
||||
If no valid Punycode-encoded string is given, an exception is thrown.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
punycodeEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Punycode-encoded string. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The plaintext of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select punycodeDecode('Mnchen-3ya');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─punycodeDecode('Mnchen-3ya')─┐
|
||||
│ München │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## tryPunycodeDecode
|
||||
|
||||
Like `punycodeDecode` but returns an empty string if no valid Punycode-encoded string is given.
|
||||
|
||||
## idnaEncode
|
||||
|
||||
Returns the the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
The input string must be UTF-encoded and translatable to an ASCII string, otherwise an exception is thrown.
|
||||
Note: No percent decoding or trimming of tabs, spaces or control characters is performed.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
idnaEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A ASCII representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select idnaEncode('straße.münchen.de');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─idnaEncode('straße.münchen.de')─────┐
|
||||
│ xn--strae-oqa.xn--mnchen-3ya.de │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tryIdnaEncode
|
||||
|
||||
Like `idnaEncode` but returns an empty string in case of an error instead of throwing an exception.
|
||||
|
||||
## idnaDecode
|
||||
|
||||
Returns the the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
In case of an error (e.g. because the input is invalid), the input string is returned.
|
||||
Note that repeated application of `idnaEncode()` and `idnaDecode()` does not necessarily return the original string due to case normalization.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
idnaDecode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A Unicode (UTF-8) representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de')─┐
|
||||
│ straße.münchen.de │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## byteHammingDistance
|
||||
|
||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||
@ -1463,6 +1605,78 @@ Result:
|
||||
|
||||
Alias: levenshteinDistance
|
||||
|
||||
## damerauLevenshteinDistance
|
||||
|
||||
Calculates the [Damerau-Levenshtein distance](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
damerauLevenshteinDistance(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT damerauLevenshteinDistance('clickhouse', 'mouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─damerauLevenshteinDistance('clickhouse', 'mouse')─┐
|
||||
│ 6 │
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## jaroSimilarity
|
||||
|
||||
Calculates the [Jaro similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance#Jaro_similarity) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
jaroSimilarity(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT jaroSimilarity('clickhouse', 'click');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─jaroSimilarity('clickhouse', 'click')─┐
|
||||
│ 0.8333333333333333 │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## jaroWinklerSimilarity
|
||||
|
||||
Calculates the [Jaro-Winkler similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance#Jaro%E2%80%93Winkler_similarity) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
jaroWinklerSimilarity(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT jaroWinklerSimilarity('clickhouse', 'click');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─jaroWinklerSimilarity('clickhouse', 'click')─┐
|
||||
│ 0.8999999999999999 │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## initcap
|
||||
|
||||
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
|
||||
|
@ -207,7 +207,7 @@ Functions `multiSearchFirstIndexCaseInsensitive`, `multiSearchFirstIndexUTF8` an
|
||||
multiSearchFirstIndex(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\])
|
||||
```
|
||||
|
||||
## multiSearchAny
|
||||
## multiSearchAny {#multisearchany}
|
||||
|
||||
Returns 1, if at least one string needle<sub>i</sub> matches the string `haystack` and 0 otherwise.
|
||||
|
||||
@ -219,7 +219,7 @@ Functions `multiSearchAnyCaseInsensitive`, `multiSearchAnyUTF8` and `multiSearch
|
||||
multiSearchAny(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\])
|
||||
```
|
||||
|
||||
## match
|
||||
## match {#match}
|
||||
|
||||
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular syntax](https://github.com/google/re2/wiki/Syntax).
|
||||
|
||||
@ -414,7 +414,7 @@ Result:
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## like
|
||||
## like {#like}
|
||||
|
||||
Returns whether string `haystack` matches the LIKE expression `pattern`.
|
||||
|
||||
@ -445,7 +445,7 @@ like(haystack, pattern)
|
||||
|
||||
Alias: `haystack LIKE pattern` (operator)
|
||||
|
||||
## notLike
|
||||
## notLike {#notlike}
|
||||
|
||||
Like `like` but negates the result.
|
||||
|
||||
|
@ -57,3 +57,56 @@ Result:
|
||||
│ 6 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## seriesDecomposeSTL
|
||||
|
||||
Decomposes a time series using STL [(Seasonal-Trend Decomposition Procedure Based on Loess)](https://www.wessa.net/download/stl.pdf) into a season, a trend and a residual component.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
seriesDecomposeSTL(series, period);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `series` - An array of numeric values
|
||||
- `period` - A positive integer
|
||||
|
||||
The number of data points in `series` should be at least twice the value of `period`.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An array of three arrays where the first array include seasonal components, the second array - trend,
|
||||
and the third array - residue component.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT seriesDecomposeSTL([10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34], 3) AS print_0;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────print_0──────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ [[
|
||||
-13.529999, -3.1799996, 16.71, -13.53, -3.1799996, 16.71, -13.53, -3.1799996,
|
||||
16.71, -13.530001, -3.18, 16.710001, -13.530001, -3.1800003, 16.710001, -13.530001,
|
||||
-3.1800003, 16.710001, -13.530001, -3.1799994, 16.71, -13.529999, -3.1799994, 16.709997
|
||||
],
|
||||
[
|
||||
23.63, 23.63, 23.630003, 23.630001, 23.630001, 23.630001, 23.630001, 23.630001,
|
||||
23.630001, 23.630001, 23.630001, 23.63, 23.630001, 23.630001, 23.63, 23.630001,
|
||||
23.630001, 23.63, 23.630001, 23.630001, 23.630001, 23.630001, 23.630001, 23.630003
|
||||
],
|
||||
[
|
||||
0, 0.0000019073486, -0.0000019073486, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0000019073486, 0,
|
||||
0
|
||||
]] │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -293,6 +293,8 @@ You can't combine both ways in one query.
|
||||
|
||||
Along with columns descriptions constraints could be defined:
|
||||
|
||||
### CONSTRAINT
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
@ -307,6 +309,30 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
Adding large amount of constraints can negatively affect performance of big `INSERT` queries.
|
||||
|
||||
### ASSUME
|
||||
|
||||
The `ASSUME` clause is used to define a `CONSTRAINT` on a table that is assumed to be true. This constraint can then be used by the optimizer to enhance the performance of SQL queries.
|
||||
|
||||
Take this example where `ASSUME CONSTRAINT` is used in the creation of the `users_a` table:
|
||||
|
||||
```sql
|
||||
CREATE TABLE users_a (
|
||||
uid Int16,
|
||||
name String,
|
||||
age Int16,
|
||||
name_len UInt8 MATERIALIZED length(name),
|
||||
CONSTRAINT c1 ASSUME length(name) = name_len
|
||||
)
|
||||
ENGINE=MergeTree
|
||||
ORDER BY (name_len, name);
|
||||
```
|
||||
|
||||
Here, `ASSUME CONSTRAINT` is used to assert that the `length(name)` function always equals the value of the `name_len` column. This means that whenever `length(name)` is called in a query, ClickHouse can replace it with `name_len`, which should be faster because it avoids calling the `length()` function.
|
||||
|
||||
Then, when executing the query `SELECT name FROM users_a WHERE length(name) < 5;`, ClickHouse can optimize it to `SELECT name FROM users_a WHERE name_len < 5`; because of the `ASSUME CONSTRAINT`. This can make the query run faster because it avoids calculating the length of `name` for each row.
|
||||
|
||||
`ASSUME CONSTRAINT` **does not enforce the constraint**, it merely informs the optimizer that the constraint holds true. If the constraint is not actually true, the results of the queries may be incorrect. Therefore, you should only use `ASSUME CONSTRAINT` if you are sure that the constraint is true.
|
||||
|
||||
## TTL Expression
|
||||
|
||||
Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
@ -372,15 +398,23 @@ ClickHouse supports general purpose codecs and specialized codecs.
|
||||
|
||||
#### ZSTD
|
||||
|
||||
`ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default value: 1.
|
||||
`ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default level: 1.
|
||||
|
||||
High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage.
|
||||
|
||||
#### ZSTD_QAT
|
||||
|
||||
`ZSTD_QAT[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable level, implemented by [Intel® QATlib](https://github.com/intel/qatlib) and [Intel® QAT ZSTD Plugin](https://github.com/intel/QAT-ZSTD-Plugin). Possible levels: \[1, 12\]. Default level: 1. Recommended level range: \[6, 12\]. Some limitations apply:
|
||||
|
||||
- ZSTD_QAT is disabled by default and can only be used after enabling configuration setting [enable_zstd_qat_codec](../../../operations/settings/settings.md#enable_zstd_qat_codec).
|
||||
- For compression, ZSTD_QAT tries to use an Intel® QAT offloading device ([QuickAssist Technology](https://www.intel.com/content/www/us/en/developer/topic-technology/open/quick-assist-technology/overview.html)). If no such device was found, it will fallback to ZSTD compression in software.
|
||||
- Decompression is always performed in software.
|
||||
|
||||
#### DEFLATE_QPL
|
||||
|
||||
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
||||
|
||||
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`.
|
||||
- DEFLATE_QPL is disabled by default and can only be used after enabling configuration setting [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec).
|
||||
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
||||
|
@ -11,7 +11,7 @@ Its name comes from the fact that it can be looked at as executing `JOIN` with a
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_subquery>
|
||||
[LEFT] ARRAY JOIN <array>
|
||||
@ -30,7 +30,7 @@ Supported types of `ARRAY JOIN` are listed below:
|
||||
|
||||
The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Let’s create a table with an [Array](../../../sql-reference/data-types/array.md) type column and insert values into it:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
CREATE TABLE arrays_test
|
||||
(
|
||||
s String,
|
||||
@ -41,7 +41,7 @@ INSERT INTO arrays_test
|
||||
VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s───────────┬─arr─────┐
|
||||
│ Hello │ [1,2] │
|
||||
│ World │ [3,4,5] │
|
||||
@ -51,13 +51,13 @@ VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
|
||||
|
||||
The example below uses the `ARRAY JOIN` clause:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr
|
||||
FROM arrays_test
|
||||
ARRAY JOIN arr;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─arr─┐
|
||||
│ Hello │ 1 │
|
||||
│ Hello │ 2 │
|
||||
@ -69,13 +69,13 @@ ARRAY JOIN arr;
|
||||
|
||||
The next example uses the `LEFT ARRAY JOIN` clause:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr
|
||||
FROM arrays_test
|
||||
LEFT ARRAY JOIN arr;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s───────────┬─arr─┐
|
||||
│ Hello │ 1 │
|
||||
│ Hello │ 2 │
|
||||
@ -90,13 +90,13 @@ LEFT ARRAY JOIN arr;
|
||||
|
||||
An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr, a
|
||||
FROM arrays_test
|
||||
ARRAY JOIN arr AS a;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─arr─────┬─a─┐
|
||||
│ Hello │ [1,2] │ 1 │
|
||||
│ Hello │ [1,2] │ 2 │
|
||||
@ -108,13 +108,13 @@ ARRAY JOIN arr AS a;
|
||||
|
||||
Using aliases, you can perform `ARRAY JOIN` with an external array. For example:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr_external
|
||||
FROM arrays_test
|
||||
ARRAY JOIN [1, 2, 3] AS arr_external;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s───────────┬─arr_external─┐
|
||||
│ Hello │ 1 │
|
||||
│ Hello │ 2 │
|
||||
@ -130,13 +130,13 @@ ARRAY JOIN [1, 2, 3] AS arr_external;
|
||||
|
||||
Multiple arrays can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Note that all the arrays must have the same size by default. Example:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr, a, num, mapped
|
||||
FROM arrays_test
|
||||
ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐
|
||||
│ Hello │ [1,2] │ 1 │ 1 │ 2 │
|
||||
│ Hello │ [1,2] │ 2 │ 2 │ 3 │
|
||||
@ -148,13 +148,13 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma
|
||||
|
||||
The example below uses the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, arr, a, num, arrayEnumerate(arr)
|
||||
FROM arrays_test
|
||||
ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐
|
||||
│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │
|
||||
│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │
|
||||
@ -163,6 +163,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
|
||||
│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │
|
||||
└───────┴─────────┴───┴─────┴─────────────────────┘
|
||||
```
|
||||
|
||||
Multiple arrays with different sizes can be joined by using: `SETTINGS enable_unaligned_array_join = 1`. Example:
|
||||
|
||||
```sql
|
||||
@ -171,7 +172,7 @@ FROM arrays_test ARRAY JOIN arr as a, [['a','b'],['c']] as b
|
||||
SETTINGS enable_unaligned_array_join = 1;
|
||||
```
|
||||
|
||||
```text
|
||||
```response
|
||||
┌─s───────┬─arr─────┬─a─┬─b─────────┐
|
||||
│ Hello │ [1,2] │ 1 │ ['a','b'] │
|
||||
│ Hello │ [1,2] │ 2 │ ['c'] │
|
||||
@ -187,7 +188,7 @@ SETTINGS enable_unaligned_array_join = 1;
|
||||
|
||||
`ARRAY JOIN` also works with [nested data structures](../../../sql-reference/data-types/nested-data-structures/index.md):
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
CREATE TABLE nested_test
|
||||
(
|
||||
s String,
|
||||
@ -200,7 +201,7 @@ INSERT INTO nested_test
|
||||
VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s───────┬─nest.x──┬─nest.y─────┐
|
||||
│ Hello │ [1,2] │ [10,20] │
|
||||
│ World │ [3,4,5] │ [30,40,50] │
|
||||
@ -208,13 +209,13 @@ VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', []
|
||||
└─────────┴─────────┴────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, `nest.x`, `nest.y`
|
||||
FROM nested_test
|
||||
ARRAY JOIN nest;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─nest.x─┬─nest.y─┐
|
||||
│ Hello │ 1 │ 10 │
|
||||
│ Hello │ 2 │ 20 │
|
||||
@ -226,13 +227,13 @@ ARRAY JOIN nest;
|
||||
|
||||
When specifying names of nested data structures in `ARRAY JOIN`, the meaning is the same as `ARRAY JOIN` with all the array elements that it consists of. Examples are listed below:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, `nest.x`, `nest.y`
|
||||
FROM nested_test
|
||||
ARRAY JOIN `nest.x`, `nest.y`;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─nest.x─┬─nest.y─┐
|
||||
│ Hello │ 1 │ 10 │
|
||||
│ Hello │ 2 │ 20 │
|
||||
@ -244,13 +245,13 @@ ARRAY JOIN `nest.x`, `nest.y`;
|
||||
|
||||
This variation also makes sense:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, `nest.x`, `nest.y`
|
||||
FROM nested_test
|
||||
ARRAY JOIN `nest.x`;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─nest.x─┬─nest.y─────┐
|
||||
│ Hello │ 1 │ [10,20] │
|
||||
│ Hello │ 2 │ [10,20] │
|
||||
@ -262,13 +263,13 @@ ARRAY JOIN `nest.x`;
|
||||
|
||||
An alias may be used for a nested data structure, in order to select either the `JOIN` result or the source array. Example:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`
|
||||
FROM nested_test
|
||||
ARRAY JOIN nest AS n;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐
|
||||
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │
|
||||
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │
|
||||
@ -280,13 +281,13 @@ ARRAY JOIN nest AS n;
|
||||
|
||||
Example of using the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num
|
||||
FROM nested_test
|
||||
ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐
|
||||
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │
|
||||
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │
|
||||
@ -300,6 +301,11 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
|
||||
|
||||
The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer.
|
||||
|
||||
### Incompatibility with short-circuit function evaluation
|
||||
|
||||
[Short-circuit function evaluation](../../../operations/settings/index.md#short-circuit-function-evaluation) is a feature that optimizes the execution of complex expressions in specific functions such as `if`, `multiIf`, `and`, and `or`. It prevents potential exceptions, such as division by zero, from occurring during the execution of these functions.
|
||||
|
||||
`arrayJoin` is always executed and not supported for short circuit function evaluation. That's because it's a unique function processed separately from all other functions during query analysis and execution and requires additional logic that doesn't work with short circuit function execution. The reason is that the number of rows in the result depends on the arrayJoin result, and it's too complex and expensive to implement lazy execution of `arrayJoin`.
|
||||
|
||||
## Related content
|
||||
|
||||
|
@ -12,7 +12,7 @@ Join produces a new table by combining columns from one or multiple tables by us
|
||||
``` sql
|
||||
SELECT <expr_list>
|
||||
FROM <left_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ALL|ASOF] JOIN <right_table>
|
||||
(ON <expr_list>)|(USING <column_list>) ...
|
||||
```
|
||||
|
||||
@ -296,6 +296,34 @@ PASTE JOIN
|
||||
│ 1 │ 0 │
|
||||
└───┴──────┘
|
||||
```
|
||||
Note: In this case result can be nondeterministic if the reading is parallel. Example:
|
||||
```SQL
|
||||
SELECT *
|
||||
FROM
|
||||
(
|
||||
SELECT number AS a
|
||||
FROM numbers_mt(5)
|
||||
) AS t1
|
||||
PASTE JOIN
|
||||
(
|
||||
SELECT number AS a
|
||||
FROM numbers(10)
|
||||
ORDER BY a DESC
|
||||
) AS t2
|
||||
SETTINGS max_block_size = 2;
|
||||
|
||||
┌─a─┬─t2.a─┐
|
||||
│ 2 │ 9 │
|
||||
│ 3 │ 8 │
|
||||
└───┴──────┘
|
||||
┌─a─┬─t2.a─┐
|
||||
│ 0 │ 7 │
|
||||
│ 1 │ 6 │
|
||||
└───┴──────┘
|
||||
┌─a─┬─t2.a─┐
|
||||
│ 4 │ 5 │
|
||||
└───┴──────┘
|
||||
```
|
||||
|
||||
## Distributed JOIN
|
||||
|
||||
|
@ -343,13 +343,14 @@ SYSTEM START PULLING REPLICATION LOG [ON CLUSTER cluster_name] [[db.]replicated_
|
||||
Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster, but no more than `receive_timeout` seconds.
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||
```
|
||||
|
||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported:
|
||||
|
||||
- If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
|
||||
- If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed.
|
||||
- If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed.
|
||||
Additionally, the LIGHTWEIGHT modifier supports an optional FROM 'srcReplicas' clause, where 'srcReplicas' is a comma-separated list of source replica names. This extension allows for more targeted synchronization by focusing only on replication tasks originating from the specified source replicas.
|
||||
- If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed.
|
||||
|
||||
### SYNC DATABASE REPLICA
|
||||
|
@ -280,7 +280,7 @@ SYSTEM START REPLICATION QUEUES [ON CLUSTER cluster_name] [[db.]replicated_merge
|
||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||
```
|
||||
|
||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
||||
|
@ -248,7 +248,7 @@ SYSTEM START REPLICATION QUEUES [ON CLUSTER cluster_name] [[db.]replicated_merge
|
||||
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||
```
|
||||
|
||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||
|
@ -1559,7 +1559,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
QueryPipeline input;
|
||||
QueryPipeline output;
|
||||
{
|
||||
BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute();
|
||||
BlockIO io_insert = InterpreterFactory::instance().get(query_insert_ast, context_insert)->execute();
|
||||
|
||||
InterpreterSelectWithUnionQuery select(query_select_ast, context_select, SelectQueryOptions{});
|
||||
QueryPlan plan;
|
||||
@ -1944,7 +1944,7 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts,
|
||||
|
||||
auto local_context = Context::createCopy(context);
|
||||
local_context->setSettings(task_cluster->settings_pull);
|
||||
auto pipeline = InterpreterFactory::get(query_ast, local_context)->execute().pipeline;
|
||||
auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline;
|
||||
PullingPipelineExecutor executor(pipeline);
|
||||
Block block;
|
||||
executor.pull(block);
|
||||
@ -1989,7 +1989,7 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
|
||||
|
||||
auto local_context = Context::createCopy(context);
|
||||
local_context->setSettings(task_cluster->settings_pull);
|
||||
auto pipeline = InterpreterFactory::get(query_ast, local_context)->execute().pipeline;
|
||||
auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline;
|
||||
PullingPipelineExecutor executor(pipeline);
|
||||
Block result;
|
||||
executor.pull(result);
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Databases/registerDatabases.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <unistd.h>
|
||||
@ -157,6 +158,7 @@ void ClusterCopierApp::mainImpl()
|
||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
||||
context->setPath(process_path + "/");
|
||||
|
||||
registerInterpreters();
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
registerTableFunctions();
|
||||
|
@ -17,15 +17,7 @@
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/parseGlobs.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
#include <Common/re2.h>
|
||||
|
||||
static void setupLogging(const std::string & log_level)
|
||||
{
|
||||
|
@ -3,16 +3,19 @@
|
||||
#include <string_view>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
#include <IO/copyData.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Parsers/obfuscateQueries.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
@ -29,22 +32,49 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
void skipSpacesAndComments(const char*& pos, const char* end, bool print_comments)
|
||||
{
|
||||
do
|
||||
{
|
||||
/// skip spaces to avoid throw exception after last query
|
||||
while (pos != end && std::isspace(*pos))
|
||||
++pos;
|
||||
|
||||
const char * comment_begin = pos;
|
||||
/// for skip comment after the last query and to not throw exception
|
||||
if (end - pos > 2 && *pos == '-' && *(pos + 1) == '-')
|
||||
{
|
||||
pos += 2;
|
||||
/// skip until the end of the line
|
||||
while (pos != end && *pos != '\n')
|
||||
++pos;
|
||||
if (print_comments)
|
||||
std::cout << std::string_view(comment_begin, pos - comment_begin) << "\n";
|
||||
}
|
||||
/// need to parse next sql
|
||||
else
|
||||
break;
|
||||
} while (pos != end);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
|
||||
extern const char * auto_time_zones[];
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INVALID_FORMAT_INSERT_QUERY_WITH_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
{
|
||||
using namespace DB;
|
||||
@ -55,8 +85,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
desc.add_options()
|
||||
("query", po::value<std::string>(), "query to format")
|
||||
("help,h", "produce help message")
|
||||
("comments", "keep comments in the output")
|
||||
("hilite", "add syntax highlight with ANSI terminal escape sequences")
|
||||
("oneline", "format in single line")
|
||||
("max_line_length", po::value<size_t>()->default_value(0), "format in single line queries with length less than specified")
|
||||
("quiet,q", "just check syntax, no output on success")
|
||||
("multiquery,n", "allow multiple queries in the same file")
|
||||
("obfuscate", "obfuscate instead of formatting")
|
||||
@ -88,6 +120,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
bool oneline = options.count("oneline");
|
||||
bool quiet = options.count("quiet");
|
||||
bool multiple = options.count("multiquery");
|
||||
bool print_comments = options.count("comments");
|
||||
size_t max_line_length = options["max_line_length"].as<size_t>();
|
||||
bool obfuscate = options.count("obfuscate");
|
||||
bool backslash = options.count("backslash");
|
||||
bool allow_settings_after_format_in_insert = options.count("allow_settings_after_format_in_insert");
|
||||
@ -104,6 +138,19 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
return 2;
|
||||
}
|
||||
|
||||
if (oneline && max_line_length)
|
||||
{
|
||||
std::cerr << "Options 'oneline' and 'max_line_length' are mutually exclusive." << std::endl;
|
||||
return 2;
|
||||
}
|
||||
|
||||
if (max_line_length > 255)
|
||||
{
|
||||
std::cerr << "Option 'max_line_length' must be less than 256." << std::endl;
|
||||
return 2;
|
||||
}
|
||||
|
||||
|
||||
String query;
|
||||
|
||||
if (options.count("query"))
|
||||
@ -124,10 +171,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
|
||||
if (options.count("seed"))
|
||||
{
|
||||
std::string seed;
|
||||
hash_func.update(options["seed"].as<std::string>());
|
||||
}
|
||||
|
||||
registerInterpreters();
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
registerTableFunctions();
|
||||
@ -179,30 +226,75 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
{
|
||||
const char * pos = query.data();
|
||||
const char * end = pos + query.size();
|
||||
skipSpacesAndComments(pos, end, print_comments);
|
||||
|
||||
ParserQuery parser(end, allow_settings_after_format_in_insert);
|
||||
do
|
||||
while (pos != end)
|
||||
{
|
||||
size_t approx_query_length = multiple ? find_first_symbols<';'>(pos, end) - pos : end - pos;
|
||||
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth);
|
||||
|
||||
/// For insert query with data(INSERT INTO ... VALUES ...), that will lead to the formatting failure,
|
||||
/// we should throw an exception early, and make exception message more readable.
|
||||
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA,
|
||||
"Can't format ASTInsertQuery with data, since data will be lost");
|
||||
if ("Values" != insert_query->format)
|
||||
throw Exception(DB::ErrorCodes::NOT_IMPLEMENTED, "Can't format INSERT query with data format '{}'", insert_query->format);
|
||||
|
||||
/// Reset format to default to have `INSERT INTO table VALUES` instead of `INSERT INTO table VALUES FORMAT Values`
|
||||
insert_query->format = {};
|
||||
|
||||
/// We assume that data ends with a newline character (same as client does)
|
||||
const char * this_query_end = find_first_symbols<'\n'>(insert_query->data, end);
|
||||
insert_query->end = this_query_end;
|
||||
pos = this_query_end;
|
||||
insert_query_payload = getReadBufferFromASTInsertQuery(res);
|
||||
}
|
||||
|
||||
if (!quiet)
|
||||
{
|
||||
if (!backslash)
|
||||
{
|
||||
WriteBufferFromOStream res_buf(std::cout, 4096);
|
||||
formatAST(*res, res_buf, hilite, oneline);
|
||||
res_buf.finalize();
|
||||
if (multiple)
|
||||
std::cout << "\n;\n";
|
||||
WriteBufferFromOwnString str_buf;
|
||||
formatAST(*res, str_buf, hilite, oneline || approx_query_length < max_line_length);
|
||||
|
||||
if (insert_query_payload)
|
||||
{
|
||||
str_buf.write(' ');
|
||||
copyData(*insert_query_payload, str_buf);
|
||||
}
|
||||
|
||||
String res_string = str_buf.str();
|
||||
const char * s_pos = res_string.data();
|
||||
const char * s_end = s_pos + res_string.size();
|
||||
/// remove trailing spaces
|
||||
while (s_end > s_pos && isWhitespaceASCIIOneLine(*(s_end - 1)))
|
||||
--s_end;
|
||||
WriteBufferFromOStream res_cout(std::cout, 4096);
|
||||
/// For multiline queries we print ';' at new line,
|
||||
/// but for single line queries we print ';' at the same line
|
||||
bool has_multiple_lines = false;
|
||||
while (s_pos != s_end)
|
||||
{
|
||||
if (*s_pos == '\n')
|
||||
has_multiple_lines = true;
|
||||
res_cout.write(*s_pos++);
|
||||
}
|
||||
res_cout.finalize();
|
||||
|
||||
if (multiple && !insert_query_payload)
|
||||
{
|
||||
if (oneline || !has_multiple_lines)
|
||||
std::cout << ";\n";
|
||||
else
|
||||
std::cout << "\n;\n";
|
||||
}
|
||||
else if (multiple && insert_query_payload)
|
||||
/// Do not need to add ; because it's already in the insert_query_payload
|
||||
std::cout << "\n";
|
||||
|
||||
std::cout << std::endl;
|
||||
}
|
||||
/// add additional '\' at the end of each line;
|
||||
@ -230,27 +322,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
/// skip spaces to avoid throw exception after last query
|
||||
while (pos != end && std::isspace(*pos))
|
||||
++pos;
|
||||
|
||||
/// for skip comment after the last query and to not throw exception
|
||||
if (end - pos > 2 && *pos == '-' && *(pos + 1) == '-')
|
||||
{
|
||||
pos += 2;
|
||||
/// skip until the end of the line
|
||||
while (pos != end && *pos != '\n')
|
||||
++pos;
|
||||
}
|
||||
/// need to parse next sql
|
||||
else
|
||||
break;
|
||||
} while (pos != end);
|
||||
|
||||
} while (multiple && pos != end);
|
||||
skipSpacesAndComments(pos, end, print_comments);
|
||||
if (!multiple)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/ShellCommand.h>
|
||||
#include <Common/re2.h>
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <IO/copyData.h>
|
||||
@ -24,15 +25,6 @@
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
static constexpr auto documentation = R"(
|
||||
A tool to extract information from Git repository for analytics.
|
||||
|
||||
|
@ -413,13 +413,13 @@ void ReconfigCommand::execute(const DB::ASTKeeperQuery * query, DB::KeeperClient
|
||||
switch (operation)
|
||||
{
|
||||
case static_cast<UInt8>(ReconfigCommand::Operation::ADD):
|
||||
joining = query->args[1].safeGet<DB::String>();
|
||||
joining = query->args[1].safeGet<String>();
|
||||
break;
|
||||
case static_cast<UInt8>(ReconfigCommand::Operation::REMOVE):
|
||||
leaving = query->args[1].safeGet<DB::String>();
|
||||
leaving = query->args[1].safeGet<String>();
|
||||
break;
|
||||
case static_cast<UInt8>(ReconfigCommand::Operation::SET):
|
||||
new_members = query->args[1].safeGet<DB::String>();
|
||||
new_members = query->args[1].safeGet<String>();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
|
@ -95,6 +95,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/CurrentThread.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollections.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollectionConfiguration.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/Jemalloc.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/IKeeper.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/TestKeeper.cpp
|
||||
@ -126,15 +127,17 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorage.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/ObjectStorageIterator.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/StoredObject.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/registerDiskS3.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3Capabilities.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/diskSettings.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/DiskS3Utils.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/ObjectStorageFactory.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFactory.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/RegisterDiskObjectStorage.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/createReadBufferFromFileBase.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "CatBoostLibraryHandler.h"
|
||||
#include "CatBoostLibraryHandlerFactory.h"
|
||||
#include "Common/ProfileEvents.h"
|
||||
#include "ExternalDictionaryLibraryHandler.h"
|
||||
#include "ExternalDictionaryLibraryHandlerFactory.h"
|
||||
|
||||
@ -44,7 +45,7 @@ namespace
|
||||
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||
|
||||
if (!response.sent())
|
||||
*response.send() << message << std::endl;
|
||||
*response.send() << message << '\n';
|
||||
|
||||
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
|
||||
}
|
||||
@ -96,7 +97,7 @@ ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRe
|
||||
}
|
||||
|
||||
|
||||
void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
HTMLForm params(getContext()->getSettingsRef(), request);
|
||||
@ -384,7 +385,7 @@ ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExi
|
||||
}
|
||||
|
||||
|
||||
void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
try
|
||||
{
|
||||
@ -423,7 +424,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler(
|
||||
}
|
||||
|
||||
|
||||
void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
HTMLForm params(getContext()->getSettingsRef(), request);
|
||||
@ -463,6 +464,9 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
|
||||
{
|
||||
if (method == "catboost_list")
|
||||
{
|
||||
auto & read_buf = request.getStream();
|
||||
params.read(read_buf);
|
||||
|
||||
ExternalModelInfos model_infos = CatBoostLibraryHandlerFactory::instance().getModelInfos();
|
||||
|
||||
writeIntBinary(static_cast<UInt64>(model_infos.size()), out);
|
||||
@ -500,6 +504,9 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
|
||||
}
|
||||
else if (method == "catboost_removeAllModels")
|
||||
{
|
||||
auto & read_buf = request.getStream();
|
||||
params.read(read_buf);
|
||||
|
||||
CatBoostLibraryHandlerFactory::instance().removeAllModels();
|
||||
|
||||
String res = "1";
|
||||
@ -621,7 +628,7 @@ CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t ke
|
||||
}
|
||||
|
||||
|
||||
void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -20,7 +20,7 @@ class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler,
|
||||
public:
|
||||
ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
static constexpr inline auto FORMAT = "RowBinary";
|
||||
@ -36,7 +36,7 @@ class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler,
|
||||
public:
|
||||
ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
const size_t keep_alive_timeout;
|
||||
@ -65,7 +65,7 @@ class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithConte
|
||||
public:
|
||||
CatBoostLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
const size_t keep_alive_timeout;
|
||||
@ -79,7 +79,7 @@ class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContex
|
||||
public:
|
||||
CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
const size_t keep_alive_timeout;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Interpreters/Session.h>
|
||||
@ -486,6 +487,7 @@ try
|
||||
Poco::ErrorHandler::set(&error_handler);
|
||||
}
|
||||
|
||||
registerInterpreters();
|
||||
/// Don't initialize DateLUT
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
|
@ -69,7 +69,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
@ -78,7 +78,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
{
|
||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||
if (!response.sent())
|
||||
*response.send() << message << std::endl;
|
||||
*response.send() << message << '\n';
|
||||
LOG_WARNING(log, fmt::runtime(message));
|
||||
};
|
||||
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
Poco::Logger * log;
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
@ -30,7 +30,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
{
|
||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||
if (!response.sent())
|
||||
*response.send() << message << std::endl;
|
||||
response.send()->writeln(message);
|
||||
LOG_WARNING(log, fmt::runtime(message));
|
||||
};
|
||||
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
Poco::Logger * log;
|
||||
|
@ -46,12 +46,12 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string
|
||||
{
|
||||
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||
if (!response.sent())
|
||||
*response.send() << message << std::endl;
|
||||
*response.send() << message << '\n';
|
||||
LOG_WARNING(log, fmt::runtime(message));
|
||||
}
|
||||
|
||||
|
||||
void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
HTMLForm params(getContext()->getSettingsRef(), request);
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
Poco::Logger * log;
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response)
|
||||
void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -10,7 +10,7 @@ class PingHandler : public HTTPRequestHandler
|
||||
{
|
||||
public:
|
||||
explicit PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {}
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
size_t keep_alive_timeout;
|
||||
|
@ -29,7 +29,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
|
||||
void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
@ -38,7 +38,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
|
||||
{
|
||||
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
|
||||
if (!response.sent())
|
||||
*response.send() << message << std::endl;
|
||||
*response.send() << message << '\n';
|
||||
LOG_WARNING(log, fmt::runtime(message));
|
||||
};
|
||||
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
|
||||
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
|
||||
|
||||
private:
|
||||
Poco::Logger * log;
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
@ -152,6 +153,18 @@ namespace ProfileEvents
|
||||
{
|
||||
extern const Event MainConfigLoads;
|
||||
extern const Event ServerStartupMilliseconds;
|
||||
extern const Event InterfaceNativeSendBytes;
|
||||
extern const Event InterfaceNativeReceiveBytes;
|
||||
extern const Event InterfaceHTTPSendBytes;
|
||||
extern const Event InterfaceHTTPReceiveBytes;
|
||||
extern const Event InterfacePrometheusSendBytes;
|
||||
extern const Event InterfacePrometheusReceiveBytes;
|
||||
extern const Event InterfaceInterserverSendBytes;
|
||||
extern const Event InterfaceInterserverReceiveBytes;
|
||||
extern const Event InterfaceMySQLSendBytes;
|
||||
extern const Event InterfaceMySQLReceiveBytes;
|
||||
extern const Event InterfacePostgreSQLSendBytes;
|
||||
extern const Event InterfacePostgreSQLReceiveBytes;
|
||||
}
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -646,6 +659,7 @@ try
|
||||
}
|
||||
#endif
|
||||
|
||||
registerInterpreters();
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
registerTableFunctions();
|
||||
@ -2047,7 +2061,7 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr
|
||||
{
|
||||
if (type == "tcp")
|
||||
return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false));
|
||||
return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes));
|
||||
|
||||
if (type == "tls")
|
||||
#if USE_SSL
|
||||
@ -2059,20 +2073,20 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
if (type == "proxy1")
|
||||
return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name));
|
||||
if (type == "mysql")
|
||||
return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this));
|
||||
return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes));
|
||||
if (type == "postgres")
|
||||
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this));
|
||||
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes));
|
||||
if (type == "http")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes)
|
||||
);
|
||||
if (type == "prometheus")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes)
|
||||
);
|
||||
if (type == "interserver")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), ProfileEvents::InterfaceInterserverReceiveBytes, ProfileEvents::InterfaceInterserverSendBytes)
|
||||
);
|
||||
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
|
||||
@ -2205,7 +2219,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
|
||||
});
|
||||
}
|
||||
|
||||
@ -2225,7 +2239,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"https://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
|
||||
@ -2248,7 +2262,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"native protocol (tcp): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -2270,7 +2284,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"native protocol (tcp) with PROXY: " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -2293,7 +2307,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"secure native protocol (tcp_secure): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false),
|
||||
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -2317,7 +2331,7 @@ void Server::createServers(
|
||||
listen_host,
|
||||
port_name,
|
||||
"MySQL compatibility protocol: " + address.toString(),
|
||||
std::make_unique<TCPServer>(new MySQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
std::make_unique<TCPServer>(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
});
|
||||
}
|
||||
|
||||
@ -2334,7 +2348,7 @@ void Server::createServers(
|
||||
listen_host,
|
||||
port_name,
|
||||
"PostgreSQL compatibility protocol: " + address.toString(),
|
||||
std::make_unique<TCPServer>(new PostgreSQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
std::make_unique<TCPServer>(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
});
|
||||
}
|
||||
|
||||
@ -2368,7 +2382,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"Prometheus: http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes));
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -2414,7 +2428,9 @@ void Server::createInterserverServers(
|
||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
http_params,
|
||||
ProfileEvents::InterfaceInterserverReceiveBytes,
|
||||
ProfileEvents::InterfaceInterserverSendBytes));
|
||||
});
|
||||
}
|
||||
|
||||
@ -2437,7 +2453,9 @@ void Server::createInterserverServers(
|
||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
http_params,
|
||||
ProfileEvents::InterfaceInterserverReceiveBytes,
|
||||
ProfileEvents::InterfaceInterserverSendBytes));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
|
@ -713,11 +713,11 @@
|
||||
For example, if there two users A, B and a row policy is defined only for A, then
|
||||
if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
|
||||
By default this setting is false for compatibility with earlier access configurations. -->
|
||||
<users_without_row_policies_can_read_rows>false</users_without_row_policies_can_read_rows>
|
||||
<users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>
|
||||
|
||||
<!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
|
||||
however you can change this behaviour by setting this to true -->
|
||||
<on_cluster_queries_require_cluster_grant>false</on_cluster_queries_require_cluster_grant>
|
||||
<on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>
|
||||
|
||||
<!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
|
||||
by any user. You can change this behaviour by setting this to true.
|
||||
@ -725,19 +725,19 @@
|
||||
Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
|
||||
are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
|
||||
table (i.e. "system.users") will be accessible. -->
|
||||
<select_from_system_db_requires_grant>false</select_from_system_db_requires_grant>
|
||||
<select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>
|
||||
|
||||
<!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
|
||||
executed by any user. You can change this behaviour by setting this to true.
|
||||
If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. -->
|
||||
<select_from_information_schema_requires_grant>false</select_from_information_schema_requires_grant>
|
||||
<select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>
|
||||
|
||||
<!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
|
||||
previous profile. You can change this behaviour by setting this to true.
|
||||
If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
|
||||
actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
|
||||
It also enables 'changeable_in_readonly' constraint type -->
|
||||
<settings_constraints_replace_previous>false</settings_constraints_replace_previous>
|
||||
<settings_constraints_replace_previous>true</settings_constraints_replace_previous>
|
||||
|
||||
<!-- Number of seconds since last access a role is stored in the Role Cache -->
|
||||
<role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/re2.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
@ -12,15 +13,6 @@
|
||||
#include <boost/program_options.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
#define EXTRACT_PATH_PATTERN ".*\\/store/(.*)"
|
||||
|
@ -24,20 +24,12 @@
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <base/defines.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/re2.h>
|
||||
#include <Poco/AccessExpireCache.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
|
@ -200,6 +200,7 @@ enum class AccessType
|
||||
M(SYSTEM_UNFREEZE, "SYSTEM UNFREEZE", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_FAILPOINT, "SYSTEM ENABLE FAILPOINT, SYSTEM DISABLE FAILPOINT", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_LISTEN, "SYSTEM START LISTEN, SYSTEM STOP LISTEN", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_JEMALLOC, "SYSTEM JEMALLOC PURGE, SYSTEM JEMALLOC ENABLE PROFILE, SYSTEM JEMALLOC DISABLE PROFILE, SYSTEM JEMALLOC FLUSH PROFILE", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \
|
||||
\
|
||||
M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\
|
||||
|
@ -179,7 +179,7 @@ ConstStoragePtr MultipleAccessStorage::getStorage(const UUID & id) const
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorage(id);
|
||||
}
|
||||
|
||||
StoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name)
|
||||
StoragePtr MultipleAccessStorage::findStorageByName(const String & storage_name)
|
||||
{
|
||||
auto storages = getStoragesInternal();
|
||||
for (const auto & storage : *storages)
|
||||
@ -192,13 +192,13 @@ StoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_n
|
||||
}
|
||||
|
||||
|
||||
ConstStoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name) const
|
||||
ConstStoragePtr MultipleAccessStorage::findStorageByName(const String & storage_name) const
|
||||
{
|
||||
return const_cast<MultipleAccessStorage *>(this)->findStorageByName(storage_name);
|
||||
}
|
||||
|
||||
|
||||
StoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name)
|
||||
StoragePtr MultipleAccessStorage::getStorageByName(const String & storage_name)
|
||||
{
|
||||
auto storage = findStorageByName(storage_name);
|
||||
if (storage)
|
||||
@ -208,12 +208,12 @@ StoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_na
|
||||
}
|
||||
|
||||
|
||||
ConstStoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name) const
|
||||
ConstStoragePtr MultipleAccessStorage::getStorageByName(const String & storage_name) const
|
||||
{
|
||||
return const_cast<MultipleAccessStorage *>(this)->getStorageByName(storage_name);
|
||||
}
|
||||
|
||||
StoragePtr MultipleAccessStorage::findExcludingStorage(AccessEntityType type, const DB::String & name, DB::MultipleAccessStorage::StoragePtr exclude) const
|
||||
StoragePtr MultipleAccessStorage::findExcludingStorage(AccessEntityType type, const String & name, DB::MultipleAccessStorage::StoragePtr exclude) const
|
||||
{
|
||||
auto storages = getStoragesInternal();
|
||||
for (const auto & storage : *storages)
|
||||
|
@ -3,15 +3,7 @@
|
||||
#include <Analyzer/Identifier.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/ListNode.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
#include <Common/re2.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -4,15 +4,7 @@
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/ColumnTransformers.h>
|
||||
#include <Parsers/ASTAsterisk.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
#include <re2/re2.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
#include <Common/re2.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -64,39 +64,43 @@ public:
|
||||
auto lhs_argument_node_type = lhs_argument->getNodeType();
|
||||
auto rhs_argument_node_type = rhs_argument->getNodeType();
|
||||
|
||||
QueryTreeNodePtr candidate;
|
||||
|
||||
if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::FUNCTION)
|
||||
tryOptimizeComparisonTupleFunctions(node, lhs_argument, rhs_argument, comparison_function_name);
|
||||
candidate = tryOptimizeComparisonTupleFunctions(lhs_argument, rhs_argument, comparison_function_name);
|
||||
else if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::CONSTANT)
|
||||
tryOptimizeComparisonTupleFunctionAndConstant(node, lhs_argument, rhs_argument, comparison_function_name);
|
||||
candidate = tryOptimizeComparisonTupleFunctionAndConstant(lhs_argument, rhs_argument, comparison_function_name);
|
||||
else if (lhs_argument_node_type == QueryTreeNodeType::CONSTANT && rhs_argument_node_type == QueryTreeNodeType::FUNCTION)
|
||||
tryOptimizeComparisonTupleFunctionAndConstant(node, rhs_argument, lhs_argument, comparison_function_name);
|
||||
candidate = tryOptimizeComparisonTupleFunctionAndConstant(rhs_argument, lhs_argument, comparison_function_name);
|
||||
|
||||
if (candidate != nullptr && node->getResultType()->equals(*candidate->getResultType()))
|
||||
node = candidate;
|
||||
}
|
||||
|
||||
private:
|
||||
void tryOptimizeComparisonTupleFunctions(QueryTreeNodePtr & node,
|
||||
QueryTreeNodePtr tryOptimizeComparisonTupleFunctions(
|
||||
const QueryTreeNodePtr & lhs_function_node,
|
||||
const QueryTreeNodePtr & rhs_function_node,
|
||||
const std::string & comparison_function_name) const
|
||||
{
|
||||
const auto & lhs_function_node_typed = lhs_function_node->as<FunctionNode &>();
|
||||
if (lhs_function_node_typed.getFunctionName() != "tuple")
|
||||
return;
|
||||
return {};
|
||||
|
||||
const auto & rhs_function_node_typed = rhs_function_node->as<FunctionNode &>();
|
||||
if (rhs_function_node_typed.getFunctionName() != "tuple")
|
||||
return;
|
||||
return {};
|
||||
|
||||
const auto & lhs_tuple_function_arguments_nodes = lhs_function_node_typed.getArguments().getNodes();
|
||||
size_t lhs_tuple_function_arguments_nodes_size = lhs_tuple_function_arguments_nodes.size();
|
||||
|
||||
const auto & rhs_tuple_function_arguments_nodes = rhs_function_node_typed.getArguments().getNodes();
|
||||
if (lhs_tuple_function_arguments_nodes_size != rhs_tuple_function_arguments_nodes.size())
|
||||
return;
|
||||
return {};
|
||||
|
||||
if (lhs_tuple_function_arguments_nodes_size == 1)
|
||||
{
|
||||
node = makeComparisonFunction(lhs_tuple_function_arguments_nodes[0], rhs_tuple_function_arguments_nodes[0], comparison_function_name);
|
||||
return;
|
||||
return makeComparisonFunction(lhs_tuple_function_arguments_nodes[0], rhs_tuple_function_arguments_nodes[0], comparison_function_name);
|
||||
}
|
||||
|
||||
QueryTreeNodes tuple_arguments_equals_functions;
|
||||
@ -108,45 +112,44 @@ private:
|
||||
tuple_arguments_equals_functions.push_back(std::move(equals_function));
|
||||
}
|
||||
|
||||
node = makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
|
||||
return makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
|
||||
}
|
||||
|
||||
void tryOptimizeComparisonTupleFunctionAndConstant(QueryTreeNodePtr & node,
|
||||
QueryTreeNodePtr tryOptimizeComparisonTupleFunctionAndConstant(
|
||||
const QueryTreeNodePtr & function_node,
|
||||
const QueryTreeNodePtr & constant_node,
|
||||
const std::string & comparison_function_name) const
|
||||
{
|
||||
const auto & function_node_typed = function_node->as<FunctionNode &>();
|
||||
if (function_node_typed.getFunctionName() != "tuple")
|
||||
return;
|
||||
return {};
|
||||
|
||||
auto & constant_node_typed = constant_node->as<ConstantNode &>();
|
||||
const auto & constant_node_value = constant_node_typed.getValue();
|
||||
if (constant_node_value.getType() != Field::Types::Which::Tuple)
|
||||
return;
|
||||
return {};
|
||||
|
||||
const auto & constant_tuple = constant_node_value.get<const Tuple &>();
|
||||
|
||||
const auto & function_arguments_nodes = function_node_typed.getArguments().getNodes();
|
||||
size_t function_arguments_nodes_size = function_arguments_nodes.size();
|
||||
if (function_arguments_nodes_size != constant_tuple.size())
|
||||
return;
|
||||
return {};
|
||||
|
||||
auto constant_node_result_type = constant_node_typed.getResultType();
|
||||
const auto * tuple_data_type = typeid_cast<const DataTypeTuple *>(constant_node_result_type.get());
|
||||
if (!tuple_data_type)
|
||||
return;
|
||||
return {};
|
||||
|
||||
const auto & tuple_data_type_elements = tuple_data_type->getElements();
|
||||
if (tuple_data_type_elements.size() != function_arguments_nodes_size)
|
||||
return;
|
||||
return {};
|
||||
|
||||
if (function_arguments_nodes_size == 1)
|
||||
{
|
||||
auto comparison_argument_constant_value = std::make_shared<ConstantValue>(constant_tuple[0], tuple_data_type_elements[0]);
|
||||
auto comparison_argument_constant_node = std::make_shared<ConstantNode>(std::move(comparison_argument_constant_value));
|
||||
node = makeComparisonFunction(function_arguments_nodes[0], std::move(comparison_argument_constant_node), comparison_function_name);
|
||||
return;
|
||||
return makeComparisonFunction(function_arguments_nodes[0], std::move(comparison_argument_constant_node), comparison_function_name);
|
||||
}
|
||||
|
||||
QueryTreeNodes tuple_arguments_equals_functions;
|
||||
@ -160,7 +163,7 @@ private:
|
||||
tuple_arguments_equals_functions.push_back(std::move(equals_function));
|
||||
}
|
||||
|
||||
node = makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
|
||||
return makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
|
||||
}
|
||||
|
||||
QueryTreeNodePtr makeEquivalentTupleComparisonFunction(QueryTreeNodes tuple_arguments_equals_functions,
|
||||
|
@ -61,6 +61,8 @@ public:
|
||||
return;
|
||||
|
||||
auto & count_distinct_argument_column = count_distinct_arguments_nodes[0];
|
||||
if (count_distinct_argument_column->getNodeType() != QueryTreeNodeType::COLUMN)
|
||||
return;
|
||||
auto & count_distinct_argument_column_typed = count_distinct_argument_column->as<ColumnNode &>();
|
||||
|
||||
/// Build subquery SELECT count_distinct_argument_column FROM table_expression GROUP BY count_distinct_argument_column
|
||||
|
@ -49,6 +49,9 @@ public:
|
||||
if (!first_argument_column_node)
|
||||
return;
|
||||
|
||||
if (first_argument_column_node->getColumnName() == "__grouping_set")
|
||||
return;
|
||||
|
||||
auto column_source = first_argument_column_node->getColumnSource();
|
||||
auto * table_node = column_source->as<TableNode>();
|
||||
|
||||
|
@ -227,19 +227,20 @@ void resolveGroupingFunctions(QueryTreeNodePtr & query_node, ContextPtr context)
|
||||
visitor.visit(query_node);
|
||||
}
|
||||
|
||||
class GroupingFunctionsResolveVisitor : public InDepthQueryTreeVisitor<GroupingFunctionsResolveVisitor>
|
||||
class GroupingFunctionsResolveVisitor : public InDepthQueryTreeVisitorWithContext<GroupingFunctionsResolveVisitor>
|
||||
{
|
||||
using Base = InDepthQueryTreeVisitorWithContext<GroupingFunctionsResolveVisitor>;
|
||||
public:
|
||||
explicit GroupingFunctionsResolveVisitor(ContextPtr context_)
|
||||
: context(std::move(context_))
|
||||
: Base(std::move(context_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (node->getNodeType() != QueryTreeNodeType::QUERY)
|
||||
return;
|
||||
|
||||
resolveGroupingFunctions(node, context);
|
||||
resolveGroupingFunctions(node, getContext());
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -91,6 +91,9 @@ public:
|
||||
const auto * column_id = func_node->getArguments().getNodes()[0]->as<ColumnNode>();
|
||||
if (!column_id) return;
|
||||
|
||||
if (column_id->getColumnName() == "__grouping_set")
|
||||
return;
|
||||
|
||||
const auto * column_type = column_id->getColumnType().get();
|
||||
if (!isDateOrDate32(column_type) && !isDateTime(column_type) && !isDateTime64(column_type)) return;
|
||||
|
||||
|
@ -127,6 +127,7 @@ namespace ErrorCodes
|
||||
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
|
||||
extern const int SYNTAX_ERROR;
|
||||
extern const int UNEXPECTED_EXPRESSION;
|
||||
extern const int INVALID_IDENTIFIER;
|
||||
}
|
||||
|
||||
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
||||
@ -2429,7 +2430,7 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveTableIdentifierFromDatabaseCatalog(con
|
||||
{
|
||||
size_t parts_size = table_identifier.getPartsSize();
|
||||
if (parts_size < 1 || parts_size > 2)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
throw Exception(ErrorCodes::INVALID_IDENTIFIER,
|
||||
"Expected table identifier to contain 1 or 2 parts. Actual '{}'",
|
||||
table_identifier.getFullName());
|
||||
|
||||
@ -2826,7 +2827,7 @@ bool QueryAnalyzer::tryBindIdentifierToTableExpression(const IdentifierLookup &
|
||||
{
|
||||
size_t parts_size = identifier_lookup.identifier.getPartsSize();
|
||||
if (parts_size != 1 && parts_size != 2)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
throw Exception(ErrorCodes::INVALID_IDENTIFIER,
|
||||
"Expected identifier '{}' to contain 1 or 2 parts to be resolved as table expression. In scope {}",
|
||||
identifier_lookup.identifier.getFullName(),
|
||||
table_expression_node->formatASTForErrorMessage());
|
||||
@ -3054,7 +3055,7 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableExpression(const Id
|
||||
{
|
||||
size_t parts_size = identifier_lookup.identifier.getPartsSize();
|
||||
if (parts_size != 1 && parts_size != 2)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
throw Exception(ErrorCodes::INVALID_IDENTIFIER,
|
||||
"Expected identifier '{}' to contain 1 or 2 parts to be resolved as table expression. In scope {}",
|
||||
identifier_lookup.identifier.getFullName(),
|
||||
table_expression_node->formatASTForErrorMessage());
|
||||
@ -4774,7 +4775,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
{
|
||||
size_t parts_size = identifier.getPartsSize();
|
||||
if (parts_size < 1 || parts_size > 2)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
throw Exception(ErrorCodes::INVALID_IDENTIFIER,
|
||||
"Expected {} function first argument identifier to contain 1 or 2 parts. Actual '{}'. In scope {}",
|
||||
function_name,
|
||||
identifier.getFullName(),
|
||||
|
@ -52,6 +52,9 @@ public:
|
||||
return;
|
||||
|
||||
auto & column_node = node->as<ColumnNode &>();
|
||||
if (column_node.getColumnName() == "__grouping_set")
|
||||
return;
|
||||
|
||||
auto column_source_node = column_node.getColumnSource();
|
||||
auto column_source_node_type = column_source_node->getNodeType();
|
||||
|
||||
|
@ -190,6 +190,12 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node)
|
||||
}
|
||||
}
|
||||
|
||||
void QueryTreePassManager::runOnlyResolve(QueryTreeNodePtr query_tree_node)
|
||||
{
|
||||
// Run only QueryAnalysisPass and GroupingFunctionsResolvePass passes.
|
||||
run(query_tree_node, 2);
|
||||
}
|
||||
|
||||
void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pass_index)
|
||||
{
|
||||
size_t passes_size = passes.size();
|
||||
@ -243,6 +249,8 @@ void QueryTreePassManager::dump(WriteBuffer & buffer, size_t up_to_pass_index)
|
||||
void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
{
|
||||
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
||||
manager.addPass(std::make_unique<GroupingFunctionsResolvePass>());
|
||||
|
||||
manager.addPass(std::make_unique<RemoveUnusedProjectionColumnsPass>());
|
||||
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
||||
|
||||
@ -278,7 +286,6 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
|
||||
manager.addPass(std::make_unique<LogicalExpressionOptimizerPass>());
|
||||
|
||||
manager.addPass(std::make_unique<GroupingFunctionsResolvePass>());
|
||||
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
|
||||
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
|
||||
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());
|
||||
|
@ -27,6 +27,9 @@ public:
|
||||
/// Run query tree passes on query tree
|
||||
void run(QueryTreeNodePtr query_tree_node);
|
||||
|
||||
/// Run only query tree passes responsible to name resolution.
|
||||
void runOnlyResolve(QueryTreeNodePtr query_tree_node);
|
||||
|
||||
/** Run query tree passes on query tree up to up_to_pass_index.
|
||||
* Throws exception if up_to_pass_index is greater than passes size.
|
||||
*/
|
||||
|
@ -396,7 +396,7 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
||||
String backup_name_for_logging = backup_info.toStringForLogging();
|
||||
String base_backup_name;
|
||||
if (backup_settings.base_backup_info)
|
||||
base_backup_name = backup_settings.base_backup_info->toString();
|
||||
base_backup_name = backup_settings.base_backup_info->toStringForLogging();
|
||||
|
||||
try
|
||||
{
|
||||
@ -750,7 +750,7 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
||||
String backup_name_for_logging = backup_info.toStringForLogging();
|
||||
String base_backup_name;
|
||||
if (restore_settings.base_backup_info)
|
||||
base_backup_name = restore_settings.base_backup_info->toString();
|
||||
base_backup_name = restore_settings.base_backup_info->toStringForLogging();
|
||||
|
||||
addInfo(restore_id, backup_name_for_logging, base_backup_name, restore_settings.internal, BackupStatus::RESTORING);
|
||||
|
||||
|
@ -551,13 +551,18 @@ endif ()
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4)
|
||||
|
||||
if (TARGET ch_contrib::qpl)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::qpl)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::qpl)
|
||||
endif ()
|
||||
|
||||
if (TARGET ch_contrib::accel-config)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::accel-config)
|
||||
endif ()
|
||||
|
||||
if (TARGET ch_contrib::qatzstd_plugin)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::qatzstd_plugin)
|
||||
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::qatzstd_plugin)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(clickhouse_common_io PUBLIC boost::context)
|
||||
dbms_target_link_libraries(PUBLIC boost::context)
|
||||
|
||||
|
@ -651,7 +651,13 @@ void Connection::sendQuery(
|
||||
if (method == "ZSTD")
|
||||
level = settings->network_zstd_compression_level;
|
||||
|
||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec);
|
||||
CompressionCodecFactory::instance().validateCodec(
|
||||
method,
|
||||
level,
|
||||
!settings->allow_suspicious_codecs,
|
||||
settings->allow_experimental_codecs,
|
||||
settings->enable_deflate_qpl_codec,
|
||||
settings->enable_zstd_qat_codec);
|
||||
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
||||
}
|
||||
else
|
||||
|
@ -118,18 +118,18 @@ ConnectionPoolWithFailover::Status ConnectionPoolWithFailover::getStatus() const
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<IConnectionPool::Entry> ConnectionPoolWithFailover::getMany(const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
AsyncCallback async_callback,
|
||||
std::optional<bool> skip_unavailable_endpoints)
|
||||
std::vector<IConnectionPool::Entry> ConnectionPoolWithFailover::getMany(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
AsyncCallback async_callback,
|
||||
std::optional<bool> skip_unavailable_endpoints,
|
||||
GetPriorityForLoadBalancing::Func priority_func)
|
||||
{
|
||||
TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message)
|
||||
{
|
||||
return tryGetEntry(pool, timeouts, fail_message, settings, nullptr, async_callback);
|
||||
};
|
||||
{ return tryGetEntry(pool, timeouts, fail_message, settings, nullptr, async_callback); };
|
||||
|
||||
std::vector<TryResult> results = getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints);
|
||||
std::vector<TryResult> results = getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints, priority_func);
|
||||
|
||||
std::vector<Entry> entries;
|
||||
entries.reserve(results.size());
|
||||
@ -153,17 +153,17 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
|
||||
std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyChecked(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings, PoolMode pool_mode,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const QualifiedTableName & table_to_check,
|
||||
AsyncCallback async_callback,
|
||||
std::optional<bool> skip_unavailable_endpoints)
|
||||
std::optional<bool> skip_unavailable_endpoints,
|
||||
GetPriorityForLoadBalancing::Func priority_func)
|
||||
{
|
||||
TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message)
|
||||
{
|
||||
return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, async_callback);
|
||||
};
|
||||
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, async_callback); };
|
||||
|
||||
return getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints);
|
||||
return getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints, priority_func);
|
||||
}
|
||||
|
||||
ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::makeGetPriorityFunc(const Settings & settings)
|
||||
@ -175,14 +175,16 @@ ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::ma
|
||||
}
|
||||
|
||||
std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyImpl(
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
std::optional<bool> skip_unavailable_endpoints)
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
std::optional<bool> skip_unavailable_endpoints,
|
||||
GetPriorityForLoadBalancing::Func priority_func)
|
||||
{
|
||||
if (nested_pools.empty())
|
||||
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED,
|
||||
"Cannot get connection from ConnectionPoolWithFailover cause nested pools are empty");
|
||||
throw DB::Exception(
|
||||
DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED,
|
||||
"Cannot get connection from ConnectionPoolWithFailover cause nested pools are empty");
|
||||
|
||||
if (!skip_unavailable_endpoints.has_value())
|
||||
skip_unavailable_endpoints = settings.skip_unavailable_shards;
|
||||
@ -203,14 +205,13 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
else
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode");
|
||||
|
||||
GetPriorityFunc get_priority = makeGetPriorityFunc(settings);
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
||||
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
|
||||
bool fallback_to_stale_replicas = settings.fallback_to_stale_replicas_for_distributed_queries.value;
|
||||
|
||||
return Base::getMany(min_entries, max_entries, max_tries,
|
||||
max_ignored_errors, fallback_to_stale_replicas,
|
||||
try_get_entry, get_priority);
|
||||
return Base::getMany(min_entries, max_entries, max_tries, max_ignored_errors, fallback_to_stale_replicas, try_get_entry, priority_func);
|
||||
}
|
||||
|
||||
ConnectionPoolWithFailover::TryResult
|
||||
@ -251,11 +252,14 @@ ConnectionPoolWithFailover::tryGetEntry(
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<ConnectionPoolWithFailover::Base::ShuffledPool> ConnectionPoolWithFailover::getShuffledPools(const Settings & settings)
|
||||
std::vector<ConnectionPoolWithFailover::Base::ShuffledPool>
|
||||
ConnectionPoolWithFailover::getShuffledPools(const Settings & settings, GetPriorityForLoadBalancing::Func priority_func)
|
||||
{
|
||||
GetPriorityFunc get_priority = makeGetPriorityFunc(settings);
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
||||
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
|
||||
return Base::getShuffledPools(max_ignored_errors, get_priority);
|
||||
return Base::getShuffledPools(max_ignored_errors, priority_func);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -54,10 +54,13 @@ public:
|
||||
/** Allocates up to the specified number of connections to work.
|
||||
* Connections provide access to different replicas of one shard.
|
||||
*/
|
||||
std::vector<Entry> getMany(const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings, PoolMode pool_mode,
|
||||
AsyncCallback async_callback = {},
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt);
|
||||
std::vector<Entry> getMany(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
AsyncCallback async_callback = {},
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt,
|
||||
GetPriorityForLoadBalancing::Func priority_func = {});
|
||||
|
||||
/// The same as getMany(), but return std::vector<TryResult>.
|
||||
std::vector<TryResult> getManyForTableFunction(const ConnectionTimeouts & timeouts,
|
||||
@ -69,12 +72,13 @@ public:
|
||||
/// The same as getMany(), but check that replication delay for table_to_check is acceptable.
|
||||
/// Delay threshold is taken from settings.
|
||||
std::vector<TryResult> getManyChecked(
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const QualifiedTableName & table_to_check,
|
||||
AsyncCallback async_callback = {},
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt);
|
||||
const ConnectionTimeouts & timeouts,
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const QualifiedTableName & table_to_check,
|
||||
AsyncCallback async_callback = {},
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt,
|
||||
GetPriorityForLoadBalancing::Func priority_func = {});
|
||||
|
||||
struct NestedPoolStatus
|
||||
{
|
||||
@ -87,7 +91,7 @@ public:
|
||||
using Status = std::vector<NestedPoolStatus>;
|
||||
Status getStatus() const;
|
||||
|
||||
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings);
|
||||
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {});
|
||||
|
||||
size_t getMaxErrorCup() const { return Base::max_error_cap; }
|
||||
|
||||
@ -96,13 +100,16 @@ public:
|
||||
Base::updateSharedErrorCounts(shuffled_pools);
|
||||
}
|
||||
|
||||
size_t getPoolSize() const { return Base::getPoolSize(); }
|
||||
|
||||
private:
|
||||
/// Get the values of relevant settings and call Base::getMany()
|
||||
std::vector<TryResult> getManyImpl(
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt);
|
||||
const Settings & settings,
|
||||
PoolMode pool_mode,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
std::optional<bool> skip_unavailable_endpoints = std::nullopt,
|
||||
GetPriorityForLoadBalancing::Func priority_func = {});
|
||||
|
||||
/// Try to get a connection from the pool and check that it is good.
|
||||
/// If table_to_check is not null and the check is enabled in settings, check that replication delay
|
||||
@ -115,7 +122,7 @@ private:
|
||||
const QualifiedTableName * table_to_check = nullptr,
|
||||
AsyncCallback async_callback = {});
|
||||
|
||||
GetPriorityFunc makeGetPriorityFunc(const Settings & settings);
|
||||
GetPriorityForLoadBalancing::Func makeGetPriorityFunc(const Settings & settings);
|
||||
|
||||
GetPriorityForLoadBalancing get_priority_load_balancing;
|
||||
};
|
||||
|
@ -28,16 +28,18 @@ HedgedConnections::HedgedConnections(
|
||||
const ThrottlerPtr & throttler_,
|
||||
PoolMode pool_mode,
|
||||
std::shared_ptr<QualifiedTableName> table_to_check_,
|
||||
AsyncCallback async_callback)
|
||||
AsyncCallback async_callback,
|
||||
GetPriorityForLoadBalancing::Func priority_func)
|
||||
: hedged_connections_factory(
|
||||
pool_,
|
||||
context_->getSettingsRef(),
|
||||
timeouts_,
|
||||
context_->getSettingsRef().connections_with_failover_max_tries.value,
|
||||
context_->getSettingsRef().fallback_to_stale_replicas_for_distributed_queries.value,
|
||||
context_->getSettingsRef().max_parallel_replicas.value,
|
||||
context_->getSettingsRef().skip_unavailable_shards.value,
|
||||
table_to_check_)
|
||||
pool_,
|
||||
context_->getSettingsRef(),
|
||||
timeouts_,
|
||||
context_->getSettingsRef().connections_with_failover_max_tries.value,
|
||||
context_->getSettingsRef().fallback_to_stale_replicas_for_distributed_queries.value,
|
||||
context_->getSettingsRef().max_parallel_replicas.value,
|
||||
context_->getSettingsRef().skip_unavailable_shards.value,
|
||||
table_to_check_,
|
||||
priority_func)
|
||||
, context(std::move(context_))
|
||||
, settings(context->getSettingsRef())
|
||||
, throttler(throttler_)
|
||||
|
@ -70,13 +70,15 @@ public:
|
||||
size_t index;
|
||||
};
|
||||
|
||||
HedgedConnections(const ConnectionPoolWithFailoverPtr & pool_,
|
||||
ContextPtr context_,
|
||||
const ConnectionTimeouts & timeouts_,
|
||||
const ThrottlerPtr & throttler,
|
||||
PoolMode pool_mode,
|
||||
std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr,
|
||||
AsyncCallback async_callback = {});
|
||||
HedgedConnections(
|
||||
const ConnectionPoolWithFailoverPtr & pool_,
|
||||
ContextPtr context_,
|
||||
const ConnectionTimeouts & timeouts_,
|
||||
const ThrottlerPtr & throttler,
|
||||
PoolMode pool_mode,
|
||||
std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr,
|
||||
AsyncCallback async_callback = {},
|
||||
GetPriorityForLoadBalancing::Func priority_func = {});
|
||||
|
||||
void sendScalarsData(Scalars & data) override;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user