Merge branch 'master' into time_buckets_impl

This commit is contained in:
Yarik Briukhovetskyi 2024-01-12 17:48:14 +01:00 committed by GitHub
commit b6a98f7a24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
710 changed files with 17225 additions and 4632 deletions

9
.gitmodules vendored
View File

@ -245,6 +245,12 @@
[submodule "contrib/idxd-config"] [submodule "contrib/idxd-config"]
path = contrib/idxd-config path = contrib/idxd-config
url = https://github.com/intel/idxd-config url = https://github.com/intel/idxd-config
[submodule "contrib/QAT-ZSTD-Plugin"]
path = contrib/QAT-ZSTD-Plugin
url = https://github.com/intel/QAT-ZSTD-Plugin
[submodule "contrib/qatlib"]
path = contrib/qatlib
url = https://github.com/intel/qatlib
[submodule "contrib/wyhash"] [submodule "contrib/wyhash"]
path = contrib/wyhash path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash url = https://github.com/wangyi-fudan/wyhash
@ -360,3 +366,6 @@
[submodule "contrib/sqids-cpp"] [submodule "contrib/sqids-cpp"]
path = contrib/sqids-cpp path = contrib/sqids-cpp
url = https://github.com/sqids/sqids-cpp.git url = https://github.com/sqids/sqids-cpp.git
[submodule "contrib/idna"]
path = contrib/idna
url = https://github.com/ada-url/idna.git

View File

@ -33,7 +33,7 @@ curl https://clickhouse.com/ | sh
## Upcoming Events ## Upcoming Events
Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com. Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com.
## Recent Recordings ## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -154,6 +154,7 @@ add_contrib (libpqxx-cmake libpqxx)
add_contrib (libpq-cmake libpq) add_contrib (libpq-cmake libpq)
add_contrib (nuraft-cmake NuRaft) add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float) add_contrib (fast_float-cmake fast_float)
add_contrib (idna-cmake idna)
add_contrib (datasketches-cpp-cmake datasketches-cpp) add_contrib (datasketches-cpp-cmake datasketches-cpp)
add_contrib (incbin-cmake incbin) add_contrib (incbin-cmake incbin)
add_contrib (sqids-cpp-cmake sqids-cpp) add_contrib (sqids-cpp-cmake sqids-cpp)
@ -171,9 +172,9 @@ add_contrib (s2geometry-cmake s2geometry)
add_contrib (c-ares-cmake c-ares) add_contrib (c-ares-cmake c-ares)
if (OS_LINUX AND ARCH_AMD64 AND ENABLE_SSE42) if (OS_LINUX AND ARCH_AMD64 AND ENABLE_SSE42)
option (ENABLE_QPL "Enable Intel® Query Processing Library" ${ENABLE_LIBRARIES}) option (ENABLE_QPL "Enable Intel® Query Processing Library (QPL)" ${ENABLE_LIBRARIES})
elseif(ENABLE_QPL) elseif(ENABLE_QPL)
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 arch with SSE 4.2 or higher") message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 with SSE 4.2 or higher")
endif() endif()
if (ENABLE_QPL) if (ENABLE_QPL)
add_contrib (idxd-config-cmake idxd-config) add_contrib (idxd-config-cmake idxd-config)
@ -182,6 +183,28 @@ else()
message(STATUS "Not using QPL") message(STATUS "Not using QPL")
endif () endif ()
if (OS_LINUX AND ARCH_AMD64)
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
elseif(ENABLE_QATLIB)
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
endif()
if (ENABLE_QATLIB)
option (ENABLE_QAT_USDM_DRIVER "A User Space DMA-able Memory (USDM) component which allocates/frees DMA-able memory" OFF)
option (ENABLE_QAT_OUT_OF_TREE_BUILD "Using out-of-tree driver, user needs to customize ICP_ROOT variable" OFF)
set(ICP_ROOT "" CACHE STRING "ICP_ROOT variable to define the path of out-of-tree driver package")
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
if (ICP_ROOT STREQUAL "")
message(FATAL_ERROR "Please define the path of out-of-tree driver package with -DICP_ROOT=xxx or disable out-of-tree build with -DENABLE_QAT_OUT_OF_TREE_BUILD=OFF; \
If you want out-of-tree build but have no package available, please download and build ICP package from: https://www.intel.com/content/www/us/en/download/765501.html")
endif ()
else()
add_contrib (qatlib-cmake qatlib) # requires: isa-l
endif ()
add_contrib (QAT-ZSTD-Plugin-cmake QAT-ZSTD-Plugin)
else()
message(STATUS "Not using QATLib")
endif ()
add_contrib (morton-nd-cmake morton-nd) add_contrib (morton-nd-cmake morton-nd)
if (ARCH_S390X) if (ARCH_S390X)
add_contrib(crc32-s390x-cmake crc32-s390x) add_contrib(crc32-s390x-cmake crc32-s390x)

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 2f5f52c4d8c87c2a3a3d101ca3a0194c9b77526f Subproject commit 1278e32bb0d5dc489f947e002bdf8c71b0ddaa63

1
contrib/QAT-ZSTD-Plugin vendored Submodule

@ -0,0 +1 @@
Subproject commit e5a134e12d2ea8a5b0f3b83c5b1c325fda4eb0a8

View File

@ -0,0 +1,85 @@
# Intel® QuickAssist Technology ZSTD Plugin (QAT ZSTD Plugin) is a plugin to Zstandard*(ZSTD*) for accelerating compression by QAT.
# ENABLE_QAT_OUT_OF_TREE_BUILD = 1 means kernel don't have native support, user will build and install driver from external package: https://www.intel.com/content/www/us/en/download/765501.html
# meanwhile, user need to set ICP_ROOT environment variable which point to the root directory of QAT driver source tree.
# ENABLE_QAT_OUT_OF_TREE_BUILD = 0 means kernel has built-in qat driver, QAT-ZSTD-PLUGIN just has dependency on qatlib.
if (ENABLE_QAT_OUT_OF_TREE_BUILD)
message(STATUS "Intel QATZSTD out-of-tree build, ICP_ROOT:${ICP_ROOT}")
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
set(ZSTD_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib")
set(QAT_INCLUDE_DIR "${ICP_ROOT}/quickassist/include")
set(QAT_DC_INCLUDE_DIR "${ICP_ROOT}/quickassist/include/dc")
set(QAT_AL_INCLUDE_DIR "${ICP_ROOT}/quickassist/lookaside/access_layer/include")
set(QAT_USDM_INCLUDE_DIR "${ICP_ROOT}/quickassist/utilities/libusdm_drv")
set(USDM_LIBRARY "${ICP_ROOT}/build/libusdm_drv_s.so")
set(QAT_S_LIBRARY "${ICP_ROOT}/build/libqat_s.so")
if (ENABLE_QAT_USDM_DRIVER)
add_definitions(-DENABLE_USDM_DRV)
endif()
add_library(_qatzstd_plugin ${QATZSTD_SRC})
target_link_libraries (_qatzstd_plugin PUBLIC ${USDM_LIBRARY} ${QAT_S_LIBRARY})
target_include_directories(_qatzstd_plugin
SYSTEM PUBLIC "${QATZSTD_SRC_DIR}"
PRIVATE ${QAT_INCLUDE_DIR}
${QAT_DC_INCLUDE_DIR}
${QAT_AL_INCLUDE_DIR}
${QAT_USDM_INCLUDE_DIR}
${ZSTD_LIBRARY_DIR})
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC)
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
else () # In-tree build
message(STATUS "Intel QATZSTD in-tree build")
set(QATZSTD_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/QAT-ZSTD-Plugin/src")
set(QATZSTD_SRC "${QATZSTD_SRC_DIR}/qatseqprod.c")
set(ZSTD_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib")
# please download&build ICP package from: https://www.intel.com/content/www/us/en/download/765501.html
set(ICP_ROOT "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
set(QAT_INCLUDE_DIR "${ICP_ROOT}/quickassist/include")
set(QAT_DC_INCLUDE_DIR "${ICP_ROOT}/quickassist/include/dc")
set(QAT_AL_INCLUDE_DIR "${ICP_ROOT}/quickassist/lookaside/access_layer/include")
set(QAT_USDM_INCLUDE_DIR "${ICP_ROOT}/quickassist/utilities/libusdm_drv")
set(USDM_LIBRARY "${ICP_ROOT}/build/libusdm_drv_s.so")
set(QAT_S_LIBRARY "${ICP_ROOT}/build/libqat_s.so")
set(LIBQAT_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
set(LIBQAT_HEADER_DIR "${CMAKE_CURRENT_BINARY_DIR}/include")
file(MAKE_DIRECTORY
"${LIBQAT_HEADER_DIR}/qat"
)
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/include/cpa.h"
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
)
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/include/dc/cpa_dc.h"
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
)
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include/icp_sal_poll.h"
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
)
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include/icp_sal_user.h"
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
)
file(COPY "${LIBQAT_ROOT_DIR}/quickassist/utilities/libusdm_drv/qae_mem.h"
DESTINATION "${LIBQAT_HEADER_DIR}/qat/"
)
if (ENABLE_QAT_USDM_DRIVER)
add_definitions(-DENABLE_USDM_DRV)
endif()
add_library(_qatzstd_plugin ${QATZSTD_SRC})
target_link_libraries (_qatzstd_plugin PUBLIC ch_contrib::qatlib ch_contrib::usdm)
target_include_directories(_qatzstd_plugin PRIVATE
${QAT_INCLUDE_DIR}
${QAT_DC_INCLUDE_DIR}
${QAT_AL_INCLUDE_DIR}
${QAT_USDM_INCLUDE_DIR}
${ZSTD_LIBRARY_DIR}
${LIBQAT_HEADER_DIR})
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE)
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
endif ()

2
contrib/azure vendored

@ -1 +1 @@
Subproject commit 060c54dfb0abe869c065143303a9d3e9c54c29e3 Subproject commit e71395e44f309f97b5a486f5c2c59b82f85dd2d2

1
contrib/idna vendored Submodule

@ -0,0 +1 @@
Subproject commit 3c8be01d42b75649f1ac9b697d0ef757eebfe667

View File

@ -0,0 +1,24 @@
option(ENABLE_IDNA "Enable idna support" ${ENABLE_LIBRARIES})
if ((NOT ENABLE_IDNA))
message (STATUS "Not using idna")
return()
endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/idna")
set (SRCS
"${LIBRARY_DIR}/src/idna.cpp"
"${LIBRARY_DIR}/src/mapping.cpp"
"${LIBRARY_DIR}/src/mapping_tables.cpp"
"${LIBRARY_DIR}/src/normalization.cpp"
"${LIBRARY_DIR}/src/normalization_tables.cpp"
"${LIBRARY_DIR}/src/punycode.cpp"
"${LIBRARY_DIR}/src/to_ascii.cpp"
"${LIBRARY_DIR}/src/to_unicode.cpp"
"${LIBRARY_DIR}/src/unicode_transcoding.cpp"
"${LIBRARY_DIR}/src/validity.cpp"
)
add_library (_idna ${SRCS})
target_include_directories(_idna PUBLIC "${LIBRARY_DIR}/include")
add_library (ch_contrib::idna ALIAS _idna)

View File

@ -33,7 +33,6 @@ set(SRCS
"${LIBCXX_SOURCE_DIR}/src/optional.cpp" "${LIBCXX_SOURCE_DIR}/src/optional.cpp"
"${LIBCXX_SOURCE_DIR}/src/random.cpp" "${LIBCXX_SOURCE_DIR}/src/random.cpp"
"${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp" "${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp"
"${LIBCXX_SOURCE_DIR}/src/regex.cpp"
"${LIBCXX_SOURCE_DIR}/src/ryu/d2fixed.cpp" "${LIBCXX_SOURCE_DIR}/src/ryu/d2fixed.cpp"
"${LIBCXX_SOURCE_DIR}/src/ryu/d2s.cpp" "${LIBCXX_SOURCE_DIR}/src/ryu/d2s.cpp"
"${LIBCXX_SOURCE_DIR}/src/ryu/f2s.cpp" "${LIBCXX_SOURCE_DIR}/src/ryu/f2s.cpp"

@ -1 +1 @@
Subproject commit 1834e42289c58402c804a87be4d489892b88f3ec Subproject commit 2568a7cd1297c7c3044b0f3cc0c23a6f6444d856

1
contrib/qatlib vendored Submodule

@ -0,0 +1 @@
Subproject commit abe15d7bfc083117bfbb4baee0b49ffcd1c03c5c

View File

@ -0,0 +1,213 @@
# Intel® QuickAssist Technology Library (QATlib).
message(STATUS "Intel QATlib ON")
set(LIBQAT_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib")
set(LIBQAT_DIR "${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src")
set(LIBOSAL_DIR "${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src")
set(OPENSSL_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl")
# Build 3 libraries: _qatmgr, _osal, _qatlib
# Produce ch_contrib::qatlib by linking these libraries.
# _qatmgr
SET(LIBQATMGR_sources ${LIBQAT_DIR}/qat_direct/vfio/qat_mgr_client.c
${LIBQAT_DIR}/qat_direct/vfio/qat_mgr_lib.c
${LIBQAT_DIR}/qat_direct/vfio/qat_log.c
${LIBQAT_DIR}/qat_direct/vfio/vfio_lib.c
${LIBQAT_DIR}/qat_direct/vfio/adf_pfvf_proto.c
${LIBQAT_DIR}/qat_direct/vfio/adf_pfvf_vf_msg.c
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_pf.c)
add_library(_qatmgr ${LIBQATMGR_sources})
target_include_directories(_qatmgr PRIVATE
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/vfio
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include
${LIBQAT_ROOT_DIR}/quickassist/include
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
${LIBQAT_ROOT_DIR}/quickassist/qat/drivers/crypto/qat/qat_common
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/common/include
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include)
target_compile_definitions(_qatmgr PRIVATE -DUSER_SPACE)
target_compile_options(_qatmgr PRIVATE -Wno-error=int-conversion)
# _osal
SET(LIBOSAL_sources
${LIBOSAL_DIR}/linux/user_space/OsalSemaphore.c
${LIBOSAL_DIR}/linux/user_space/OsalThread.c
${LIBOSAL_DIR}/linux/user_space/OsalMutex.c
${LIBOSAL_DIR}/linux/user_space/OsalSpinLock.c
${LIBOSAL_DIR}/linux/user_space/OsalAtomic.c
${LIBOSAL_DIR}/linux/user_space/OsalServices.c
${LIBOSAL_DIR}/linux/user_space/OsalUsrKrnProxy.c
${LIBOSAL_DIR}/linux/user_space/OsalCryptoInterface.c)
add_library(_osal ${LIBOSAL_sources})
target_include_directories(_osal PRIVATE
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
${OPENSSL_DIR}/include
${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_x86_64/include
${ClickHouse_SOURCE_DIR}/contrib/qatlib-cmake/include)
target_compile_definitions(_osal PRIVATE -DOSAL_ENSURE_ON -DUSE_OPENSSL)
# _qatlib
SET(LIBQAT_sources
${LIBQAT_DIR}/common/compression/dc_buffers.c
${LIBQAT_DIR}/common/compression/dc_chain.c
${LIBQAT_DIR}/common/compression/dc_datapath.c
${LIBQAT_DIR}/common/compression/dc_dp.c
${LIBQAT_DIR}/common/compression/dc_header_footer.c
${LIBQAT_DIR}/common/compression/dc_header_footer_lz4.c
${LIBQAT_DIR}/common/compression/dc_session.c
${LIBQAT_DIR}/common/compression/dc_stats.c
${LIBQAT_DIR}/common/compression/dc_err_sim.c
${LIBQAT_DIR}/common/compression/dc_ns_datapath.c
${LIBQAT_DIR}/common/compression/dc_ns_header_footer.c
${LIBQAT_DIR}/common/compression/dc_crc32.c
${LIBQAT_DIR}/common/compression/dc_crc64.c
${LIBQAT_DIR}/common/compression/dc_xxhash32.c
${LIBQAT_DIR}/common/compression/icp_sal_dc_err_sim.c
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_control_path.c
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_data_path.c
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_interface_check.c
${LIBQAT_DIR}/common/crypto/asym/diffie_hellman/lac_dh_stats.c
${LIBQAT_DIR}/common/crypto/asym/dsa/lac_dsa.c
${LIBQAT_DIR}/common/crypto/asym/dsa/lac_dsa_interface_check.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_common.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_montedwds.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ec_nist_curves.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecdh.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecdsa.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_ecsm2.c
${LIBQAT_DIR}/common/crypto/asym/ecc/lac_kpt_ecdsa.c
${LIBQAT_DIR}/common/crypto/asym/large_number/lac_ln.c
${LIBQAT_DIR}/common/crypto/asym/large_number/lac_ln_interface_check.c
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_mmp.c
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_qat_comms.c
${LIBQAT_DIR}/common/crypto/asym/pke_common/lac_pke_utils.c
${LIBQAT_DIR}/common/crypto/asym/prime/lac_prime.c
${LIBQAT_DIR}/common/crypto/asym/prime/lac_prime_interface_check.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_control_path.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_decrypt.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_encrypt.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_interface_check.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_keygen.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_rsa_stats.c
${LIBQAT_DIR}/common/crypto/asym/rsa/lac_kpt_rsa_decrypt.c
${LIBQAT_DIR}/common/crypto/sym/drbg/lac_sym_drbg_api.c
${LIBQAT_DIR}/common/crypto/sym/key/lac_sym_key.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_alg_chain.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_api.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_auth_enc.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_cb.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_cipher.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_compile_check.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_dp.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_hash.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_partial.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_queue.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_stats.c
${LIBQAT_DIR}/common/crypto/sym/nrbg/lac_sym_nrbg_api.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_cipher.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_constants_table.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_hash.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
${LIBQAT_DIR}/common/crypto/sym/qat/lac_sym_qat_key.c
${LIBQAT_DIR}/common/crypto/sym/lac_sym_hash_sw_precomputes.c
${LIBQAT_DIR}/common/crypto/kpt/provision/lac_kpt_provision.c
${LIBQAT_DIR}/common/ctrl/sal_compression.c
${LIBQAT_DIR}/common/ctrl/sal_create_services.c
${LIBQAT_DIR}/common/ctrl/sal_ctrl_services.c
${LIBQAT_DIR}/common/ctrl/sal_list.c
${LIBQAT_DIR}/common/ctrl/sal_crypto.c
${LIBQAT_DIR}/common/ctrl/sal_dc_chain.c
${LIBQAT_DIR}/common/ctrl/sal_instances.c
${LIBQAT_DIR}/common/qat_comms/sal_qat_cmn_msg.c
${LIBQAT_DIR}/common/utils/lac_buffer_desc.c
${LIBQAT_DIR}/common/utils/lac_log_message.c
${LIBQAT_DIR}/common/utils/lac_mem.c
${LIBQAT_DIR}/common/utils/lac_mem_pools.c
${LIBQAT_DIR}/common/utils/lac_sw_responses.c
${LIBQAT_DIR}/common/utils/lac_sync.c
${LIBQAT_DIR}/common/utils/sal_service_state.c
${LIBQAT_DIR}/common/utils/sal_statistics.c
${LIBQAT_DIR}/common/utils/sal_misc_error_stats.c
${LIBQAT_DIR}/common/utils/sal_string_parse.c
${LIBQAT_DIR}/common/utils/sal_user_process.c
${LIBQAT_DIR}/common/utils/sal_versions.c
${LIBQAT_DIR}/common/device/sal_dev_info.c
${LIBQAT_DIR}/user/sal_user.c
${LIBQAT_DIR}/user/sal_user_dyn_instance.c
${LIBQAT_DIR}/qat_direct/common/adf_process_proxy.c
${LIBQAT_DIR}/qat_direct/common/adf_user_cfg.c
${LIBQAT_DIR}/qat_direct/common/adf_user_device.c
${LIBQAT_DIR}/qat_direct/common/adf_user_dyn.c
${LIBQAT_DIR}/qat_direct/common/adf_user_ETring_mgr_dp.c
${LIBQAT_DIR}/qat_direct/common/adf_user_init.c
${LIBQAT_DIR}/qat_direct/common/adf_user_ring.c
${LIBQAT_DIR}/qat_direct/common/adf_user_transport_ctrl.c
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_cfg.c
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_ring.c
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_user_bundles.c
${LIBQAT_DIR}/qat_direct/vfio/adf_vfio_user_proxy.c
${LIBQAT_DIR}/common/compression/dc_crc_base.c)
add_library(_qatlib ${LIBQAT_sources})
target_include_directories(_qatlib PRIVATE
${CMAKE_SYSROOT}/usr/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/include
${LIBQAT_ROOT_DIR}/quickassist/utilities/libusdm_drv
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/include
${LIBOSAL_DIR}/linux/user_space/include
${LIBQAT_ROOT_DIR}/quickassist/include
${LIBQAT_ROOT_DIR}/quickassist/include/lac
${LIBQAT_ROOT_DIR}/quickassist/include/dc
${LIBQAT_ROOT_DIR}/quickassist/qat/drivers/crypto/qat/qat_common
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/compression/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/crypto/sym/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/crypto/asym/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/firmware/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/common/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/common/include
${LIBQAT_ROOT_DIR}/quickassist/lookaside/access_layer/src/qat_direct/vfio
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space
${LIBQAT_ROOT_DIR}/quickassist/utilities/osal/src/linux/user_space/include
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include)
target_link_libraries(_qatlib PRIVATE _qatmgr _osal OpenSSL::SSL ch_contrib::isal)
target_compile_definitions(_qatlib PRIVATE -DUSER_SPACE -DLAC_BYTE_ORDER=__LITTLE_ENDIAN -DOSAL_ENSURE_ON)
target_link_options(_qatlib PRIVATE -pie -z relro -z now -z noexecstack)
target_compile_options(_qatlib PRIVATE -march=native)
add_library (ch_contrib::qatlib ALIAS _qatlib)
# _usdm
set(LIBUSDM_DIR "${ClickHouse_SOURCE_DIR}/contrib/qatlib/quickassist/utilities/libusdm_drv")
set(LIBUSDM_sources
${LIBUSDM_DIR}/user_space/vfio/qae_mem_utils_vfio.c
${LIBUSDM_DIR}/user_space/qae_mem_utils_common.c
${LIBUSDM_DIR}/user_space/vfio/qae_mem_hugepage_utils_vfio.c)
add_library(_usdm ${LIBUSDM_sources})
target_include_directories(_usdm PRIVATE
${ClickHouse_SOURCE_DIR}/contrib/sysroot/linux-x86_64-musl/include
${LIBUSDM_DIR}
${LIBUSDM_DIR}/include
${LIBUSDM_DIR}/user_space)
add_library (ch_contrib::usdm ALIAS _usdm)

View File

@ -0,0 +1,14 @@
/* This is a workaround for a build conflict issue
1. __GLIBC_PREREQ (referenced in OsalServices.c) is only defined in './sysroot/linux-x86_64/include/features.h'
2. mqueue.h only exist under './sysroot/linux-x86_64-musl/'
This cause target_include_directories for _osal has a conflict between './sysroot/linux-x86_64/include' and './sysroot/linux-x86_64-musl/'
hence create mqueue.h separately under ./qatlib-cmake/include as an alternative.
*/
/* Major and minor version number of the GNU C library package. Use
these macros to test for features in specific releases. */
#define __GLIBC__ 2
#define __GLIBC_MINOR__ 27
#define __GLIBC_PREREQ(maj, min) \
((__GLIBC__ << 16) + __GLIBC_MINOR__ >= ((maj) << 16) + (min))

2
contrib/rocksdb vendored

@ -1 +1 @@
Subproject commit 66e3cbec31400ed3a23deb878c5d7f56f990f0ae Subproject commit dead55e60b873d5f70f0e9458fbbba2b2180f430

2
contrib/sqids-cpp vendored

@ -1 +1 @@
Subproject commit 3756e537d4d48cc0dd4176801fe19f99601439b0 Subproject commit a471f53672e98d49223f598528a533b07b085c61

View File

@ -41,6 +41,10 @@ readarray -t DISKS_PATHS < <(clickhouse extract-from-config --config-file "$CLIC
readarray -t DISKS_METADATA_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.metadata_path' || true) readarray -t DISKS_METADATA_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.metadata_path' || true)
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}" CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
CLICKHOUSE_PASSWORD_FILE="${CLICKHOUSE_PASSWORD_FILE:-}"
if [[ -n "${CLICKHOUSE_PASSWORD_FILE}" && -f "${CLICKHOUSE_PASSWORD_FILE}" ]]; then
CLICKHOUSE_PASSWORD="$(cat "${CLICKHOUSE_PASSWORD_FILE}")"
fi
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}" CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}" CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}" CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}"

View File

@ -44,6 +44,9 @@ if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TR
# It is not needed, we will explicitly create tables on s3. # It is not needed, we will explicitly create tables on s3.
# We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository. # We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository.
rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml
rm /etc/clickhouse-server/config.d/storage_metadata_with_full_object_key.xml
rm /etc/clickhouse-server/config.d/s3_storage_policy_with_template_object_key.xml
fi fi
function start() function start()

View File

@ -193,6 +193,7 @@ stop
# Let's enable S3 storage by default # Let's enable S3 storage by default
export USE_S3_STORAGE_FOR_MERGE_TREE=1 export USE_S3_STORAGE_FOR_MERGE_TREE=1
export RANDOMIZE_OBJECT_KEY_TYPE=1
export ZOOKEEPER_FAULT_INJECTION=1 export ZOOKEEPER_FAULT_INJECTION=1
configure configure

View File

@ -11,7 +11,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
``` sql ``` sql
CREATE TABLE s3_queue_engine_table (name String, value UInt32) CREATE TABLE s3_queue_engine_table (name String, value UInt32)
ENGINE = S3Queue(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression]) ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression])
[SETTINGS] [SETTINGS]
[mode = 'unordered',] [mode = 'unordered',]
[after_processing = 'keep',] [after_processing = 'keep',]

View File

@ -504,24 +504,25 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted | | Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------| |------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✗ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ | | [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ | | [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | | [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | | [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ | | [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | | hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ | | hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
@ -1143,6 +1144,8 @@ Optional parameters:
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`. - `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). - `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk). - `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`.
- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here.
### Configuring the cache ### Configuring the cache

View File

@ -29,10 +29,6 @@ Transactionally inconsistent caching is traditionally provided by client tools o
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
This reduces maintenance effort and avoids redundancy. This reduces maintenance effort and avoids redundancy.
:::note
Security consideration: The cached query result is tied to the user executing it. Authorization checks are performed when the query is executed. This means that if there are any alterations to the user's role or permissions between the time the query is cached and when the cache is accessed, the result will not reflect these changes. We recommend using different users to distinguish between different levels of access, instead of actively toggling roles for a single user between queries, as this practice may lead to unexpected query results.
:::
## Configuration Settings and Usage ## Configuration Settings and Usage
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the

View File

@ -1,5 +1,5 @@
--- ---
sidebar_label: Settings Overview title: "Settings Overview"
sidebar_position: 1 sidebar_position: 1
slug: /en/operations/settings/ slug: /en/operations/settings/
pagination_next: en/operations/settings/settings pagination_next: en/operations/settings/settings
@ -16,11 +16,34 @@ There are two main groups of ClickHouse settings:
- Global server settings - Global server settings
- Query-level settings - Query-level settings
The main distinction between global server settings and query-level settings is that The main distinction between global server settings and query-level settings is that global server settings must be set in configuration files, while query-level settings can be set in configuration files or with SQL queries.
global server settings must be set in configuration files while query-level settings
can be set in configuration files or with SQL queries.
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level. Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level. Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query level.
## See non-default settings
To view which settings have been changed from their default value:
```sql
SELECT name, value FROM system.settings WHERE changed
```
If you haven't changed any settings from their default value, then ClickHouse will return nothing.
To check the value of a particular setting, specify the `name` of the setting in your query:
```sql
SELECT name, value FROM system.settings WHERE name = 'max_threads'
```
This command should return something like:
```response
┌─name────────┬─value─────┐
│ max_threads │ 'auto(8)' │
└─────────────┴───────────┘
1 row in set. Elapsed: 0.002 sec.
```

View File

@ -0,0 +1,176 @@
# The MySQL Binlog Client
The MySQL Binlog Client provides a mechanism in ClickHouse to share the binlog from a MySQL instance among multiple [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) databases. This avoids consuming unnecessary bandwidth and CPU when replicating more than one schema/database.
The implementation is resilient against crashes and disk issues. The executed GTID sets of the binlog itself and the consuming databases have persisted only after the data they describe has been safely persisted as well. The implementation also tolerates re-doing aborted operations (at-least-once delivery).
# Settings
## use_binlog_client
Forces to reuse existing MySQL binlog connection or creates new one if does not exist. The connection is defined by `user:pass@host:port`.
Default value: 0
**Example**
```sql
-- create MaterializedMySQL databases that read the events from the binlog client
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
CREATE DATABASE db3 ENGINE = MaterializedMySQL('host:port', 'db3', 'user2', 'password2') SETTINGS use_binlog_client=1
```
Databases `db1` and `db2` will use the same binlog connection, since they use the same `user:pass@host:port`. Database `db3` will use separate binlog connection.
## max_bytes_in_binlog_queue
Defines the limit of bytes in the events binlog queue. If bytes in the queue increases this limit, it will stop reading new events from MySQL until the space for new events will be freed. This introduces the memory limits. Very high value could consume all available memory. Very low value could make the databases to wait for new events.
Default value: 67108864
**Example**
```sql
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=33554432
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
```
If database `db1` is unable to consume binlog events fast enough and the size of the events queue exceeds `33554432` bytes, reading of new events from MySQL is postponed until `db1`
consumes the events and releases some space.
NOTE: This will impact to `db2`, and it will be waiting for new events too, since they share the same connection.
## max_milliseconds_to_wait_in_binlog_queue
Defines the max milliseconds to wait when `max_bytes_in_binlog_queue` exceeded. After that it will detach the database from current binlog connection and will retry establish new one to prevent other databases to wait for this database.
Default value: 10000
**Example**
```sql
CREATE DATABASE db1 ENGINE = MaterializedMySQL('host:port', 'db1', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=33554432, max_milliseconds_to_wait_in_binlog_queue=1000
CREATE DATABASE db2 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1
```
If the event queue of database `db1` is full, the binlog connection will be waiting in `1000`ms and if the database is not able to consume the events, it will be detached from the connection to create another one.
NOTE: If the database `db1` has been detached from the shared connection and created new one, after the binlog connections for `db1` and `db2` have the same positions they will be merged to one. And `db1` and `db2` will use the same connection again.
## max_bytes_in_binlog_dispatcher_buffer
Defines the max bytes in the binlog dispatcher's buffer before it is flushed to attached binlog. The events from MySQL binlog connection are buffered before sending to attached databases. It increases the events throughput from the binlog to databases.
Default value: 1048576
## max_flush_milliseconds_in_binlog_dispatcher
Defines the max milliseconds in the binlog dispatcher's buffer to wait before it is flushed to attached binlog. If there are no events received from MySQL binlog connection for a while, after some time buffered events should be sent to the attached databases.
Default value: 1000
# Design
## The Binlog Events Dispatcher
Currently each MaterializedMySQL database opens its own connection to MySQL to subscribe to binlog events. There is a need to have only one connection and _dispatch_ the binlog events to all databases that replicate from the same MySQL instance.
## Each MaterializedMySQL Database Has Its Own Event Queue
To prevent slowing down other instances there should be an _event queue_ per MaterializedMySQL database to handle the events independently of the speed of other instances. The dispatcher reads an event from the binlog, and sends it to every MaterializedMySQL database that needs it. Each database handles its events in separate threads.
## Catching up
If several databases have the same binlog position, they can use the same dispatcher. If a newly created database (or one that has been detached for some time) requests events that have been already processed, we need to create another communication _channel_ to the binlog. We do this by creating another temporary dispatcher for such databases. When the new dispatcher _catches up with_ the old one, the new/temporary dispatcher is not needed anymore and all databases getting events from this dispatcher can be moved to the old one.
## Memory Limit
There is a _memory limit_ to control event queue memory consumption per MySQL Client. If a database is not able to handle events fast enough, and the event queue is getting full, we have the following options:
1. The dispatcher is blocked until the slowest database frees up space for new events. All other databases are waiting for the slowest one. (Preferred)
2. The dispatcher is _never_ blocked, but suspends incremental sync for the slow database and continues dispatching events to remained databases.
## Performance
A lot of CPU can be saved by not processing every event in every database. The binlog contains events for all databases, it is wasteful to distribute row events to a database that it will not process it, especially if there are a lot of databases. This requires some sort of per-database binlog filtering and buffering.
Currently all events are sent to all MaterializedMySQL databases but parsing the event which consumes CPU is up to the database.
# Detailed Design
1. If a client (e.g. database) wants to read a stream of the events from MySQL binlog, it creates a connection to remote binlog by host/user/password and _executed GTID set_ params.
2. If another client wants to read the events from the binlog but for different _executed GTID set_, it is **not** possible to reuse existing connection to MySQL, then need to create another connection to the same remote binlog. (_This is how it is implemented today_).
3. When these 2 connections get the same binlog positions, they read the same events. It is logical to drop duplicate connection and move all its users out. And now one connection dispatches binlog events to several clients. Obviously only connections to the same binlog should be merged.
## Classes
1. One connection can send (or dispatch) events to several clients and might be called `BinlogEventsDispatcher`.
2. Several dispatchers grouped by _user:password@host:port_ in `BinlogClient`. Since they point to the same binlog.
3. The clients should communicate only with public API from `BinlogClient`. The result of using `BinlogClient` is an object that implements `IBinlog` to read events from. This implementation of `IBinlog` must be compatible with old implementation `MySQLFlavor` -> when replacing old implementation by new one, the behavior must not be changed.
## SQL
```sql
-- create MaterializedMySQL databases that read the events from the binlog client
CREATE DATABASE db1_client1 ENGINE = MaterializedMySQL('host:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1, max_bytes_in_binlog_queue=1024;
CREATE DATABASE db2_client1 ENGINE = MaterializedMySQL('host:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1;
CREATE DATABASE db3_client1 ENGINE = MaterializedMySQL('host:port', 'db2', 'user', 'password') SETTINGS use_binlog_client=1;
CREATE DATABASE db4_client2 ENGINE = MaterializedMySQL('host2:port', 'db', 'user', 'password') SETTINGS use_binlog_client=1;
CREATE DATABASE db5_client3 ENGINE = MaterializedMySQL('host:port', 'db', 'user1', 'password') SETTINGS use_binlog_client=1;
CREATE DATABASE db6_old ENGINE = MaterializedMySQL('host:port', 'db', 'user1', 'password') SETTINGS use_binlog_client=0;
```
Databases `db1_client1`, `db2_client1` and `db3_client1` share one instance of `BinlogClient` since they have the same params. `BinlogClient` will create 3 connections to MySQL server thus 3 instances of `BinlogEventsDispatcher`, but if these connections would have the same binlog position, they should be merged to one connection. Means all clients will be moved to one dispatcher and others will be closed. Databases `db4_client2` and `db5_client3` would use 2 different independent `BinlogClient` instances. Database `db6_old` will use old implementation. NOTE: By default `use_binlog_client` is disabled. Setting `max_bytes_in_binlog_queue` defines the max allowed bytes in the binlog queue. By default, it is `1073741824` bytes. If number of bytes exceeds this limit, the dispatching will be stopped until the space will be freed for new events.
## Binlog Table Structure
To see the status of the all `BinlogClient` instances there is `system.mysql_binlogs` system table. It shows the list of all created and _alive_ `IBinlog` instances with information about its `BinlogEventsDispatcher` and `BinlogClient`.
Example:
```
SELECT * FROM system.mysql_binlogs FORMAT Vertical
Row 1:
──────
binlog_client_name: root@127.0.0.1:3306
name: test_Clickhouse1
mysql_binlog_name: binlog.001154
mysql_binlog_pos: 7142294
mysql_binlog_timestamp: 1660082447
mysql_binlog_executed_gtid_set: a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30523304
dispatcher_name: Applier
dispatcher_mysql_binlog_name: binlog.001154
dispatcher_mysql_binlog_pos: 7142294
dispatcher_mysql_binlog_timestamp: 1660082447
dispatcher_mysql_binlog_executed_gtid_set: a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30523304
size: 0
bytes: 0
max_bytes: 0
```
### Tests
Unit tests:
```
$ ./unit_tests_dbms --gtest_filter=MySQLBinlog.*
```
Integration tests:
```
$ pytest -s -vv test_materialized_mysql_database/test.py::test_binlog_client
```
Dumps events from the file
```
$ ./utils/check-mysql-binlog/check-mysql-binlog --binlog binlog.001392
```
Dumps events from the server
```
$ ./utils/check-mysql-binlog/check-mysql-binlog --host 127.0.0.1 --port 3306 --user root --password pass --gtid a9d88f83-c14e-11ec-bb36-244bfedf7766:1-30462856
```

View File

@ -4773,6 +4773,45 @@ Type: Int64
Default: 0 Default: 0
## enable_deflate_qpl_codec {#enable_deflate_qpl_codec}
If turned on, the DEFLATE_QPL codec may be used to compress columns.
Possible values:
- 0 - Disabled
- 1 - Enabled
Type: Bool
## enable_zstd_qat_codec {#enable_zstd_qat_codec}
If turned on, the ZSTD_QAT codec may be used to compress columns.
Possible values:
- 0 - Disabled
- 1 - Enabled
Type: Bool
## output_format_compression_level
Default compression level if query output is compressed. The setting is applied when `SELECT` query has `INTO OUTFILE` or when writing to table functions `file`, `url`, `hdfs`, `s3`, or `azureBlobStorage`.
Possible values: from `1` to `22`
Default: `3`
## output_format_compression_zstd_window_log
Can be used when the output compression method is `zstd`. If greater than `0`, this setting explicitly sets compression window size (power of `2`) and enables a long-range mode for zstd compression. This can help to achieve a better compression ratio.
Possible values: non-negative numbers. Note that if the value is too small or too big, `zstdlib` will throw an exception. Typical values are from `20` (window size = `1MB`) to `30` (window size = `1GB`).
Default: `0`
## rewrite_count_distinct_if_with_count_distinct_implementation ## rewrite_count_distinct_if_with_count_distinct_implementation
Allows you to rewrite `countDistcintIf` with [count_distinct_implementation](#count_distinct_implementation) setting. Allows you to rewrite `countDistcintIf` with [count_distinct_implementation](#count_distinct_implementation) setting.
@ -5157,4 +5196,4 @@ The value 0 means that you can delete all tables without any restrictions.
:::note :::note
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop) This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
::: :::

View File

@ -0,0 +1,14 @@
---
slug: /en/operations/system-tables/dropped_tables_parts
---
# dropped_tables_parts {#system_tables-dropped_tables_parts}
Contains information about parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) dropped tables from [system.dropped_tables](./dropped_tables.md)
The schema of this table is the same as [system.parts](./parts.md)
**See Also**
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
- [system.parts](./parts.md)
- [system.dropped_tables](./dropped_tables.md)

View File

@ -42,7 +42,7 @@ Columns:
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution. - `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Query starting date. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — Query starting date.
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Query starting time. - `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Query starting time.
- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — Query starting time with microseconds precision. - `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Query starting time with microseconds precision.
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution. - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision.
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.

View File

@ -10,7 +10,7 @@ Columns:
- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query. - `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
- `event_date` (Date) — Date of the entry. - `event_date` (Date) — Date of the entry.
- `event_time` (DateTime) — Time of the entry. - `event_time` (DateTime) — Time of the entry.
- `event_time_microseconds` (DateTime) — Time of the entry with microseconds precision. - `event_time_microseconds` (DateTime64) — Time of the entry with microseconds precision.
- `microseconds` (UInt32) — Microseconds of the entry. - `microseconds` (UInt32) — Microseconds of the entry.
- `thread_name` (String) — Name of the thread from which the logging was done. - `thread_name` (String) — Name of the thread from which the logging was done.
- `thread_id` (UInt64) — OS thread ID. - `thread_id` (UInt64) — OS thread ID.

View File

@ -11,6 +11,8 @@ Keys:
- `--query` — Format queries of any length and complexity. - `--query` — Format queries of any length and complexity.
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences. - `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
- `--oneline` — Format in single line. - `--oneline` — Format in single line.
- `--max_line_length` — Format in single line queries with length less than specified.
- `--comments` — Keep comments in the output.
- `--quiet` or `-q` — Just check syntax, no output on success. - `--quiet` or `-q` — Just check syntax, no output on success.
- `--multiquery` or `-n` — Allow multiple queries in the same file. - `--multiquery` or `-n` — Allow multiple queries in the same file.
- `--obfuscate` — Obfuscate instead of formatting. - `--obfuscate` — Obfuscate instead of formatting.

View File

@ -24,7 +24,7 @@ A client application to interact with clickhouse-keeper by its native protocol.
## Example {#clickhouse-keeper-client-example} ## Example {#clickhouse-keeper-client-example}
```bash ```bash
./clickhouse-keeper-client -h localhost:9181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30 ./clickhouse-keeper-client -h localhost -p 9181 --connection-timeout 30 --session-timeout 30 --operation-timeout 30
Connected to ZooKeeper at [::1]:9181 with session_id 137 Connected to ZooKeeper at [::1]:9181 with session_id 137
/ :) ls / :) ls
keeper foo bar keeper foo bar

View File

@ -18,6 +18,12 @@ Supported range of values: \[1970-01-01 00:00:00, 2106-02-07 06:28:15\].
Resolution: 1 second. Resolution: 1 second.
## Speed
The `Date` datatype is faster than `DateTime` under _most_ conditions.
The `Date` type requires 2 bytes of storage, while `DateTime` requires 4. However, when the database compresses the database, this difference is amplified. This amplification is due to the minutes and seconds in `DateTime` being less compressible. Filtering and aggregating `Date` instead of `DateTime` is also faster.
## Usage Remarks ## Usage Remarks
The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time), regardless of the time zone or daylight saving time. The time zone affects how the values of the `DateTime` type values are displayed in text format and how the values specified as strings are parsed (2020-01-01 05:00:01). The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time), regardless of the time zone or daylight saving time. The time zone affects how the values of the `DateTime` type values are displayed in text format and how the values specified as strings are parsed (2020-01-01 05:00:01).

View File

@ -6,7 +6,7 @@ sidebar_label: Arrays
# Array Functions # Array Functions
## empty ## empty {#empty}
Checks whether the input array is empty. Checks whether the input array is empty.
@ -50,7 +50,7 @@ Result:
└────────────────┘ └────────────────┘
``` ```
## notEmpty ## notEmpty {#notempty}
Checks whether the input array is non-empty. Checks whether the input array is non-empty.
@ -221,7 +221,7 @@ SELECT has([1, 2, NULL], NULL)
└─────────────────────────┘ └─────────────────────────┘
``` ```
## hasAll ## hasAll {#hasall}
Checks whether one array is a subset of another. Checks whether one array is a subset of another.
@ -261,7 +261,7 @@ Raises an exception `NO_COMMON_TYPE` if the set and subset elements do not share
`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0. `SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0.
## hasAny ## hasAny {#hasany}
Checks whether two arrays have intersection by some elements. Checks whether two arrays have intersection by some elements.

View File

@ -1777,34 +1777,67 @@ Result:
└────────────────────────────────────────────────────────────────────────┘ └────────────────────────────────────────────────────────────────────────┘
``` ```
## sqid ## sqidEncode
Transforms numbers into a [Sqid](https://sqids.org/) which is a YouTube-like ID string. Encodes numbers as a [Sqid](https://sqids.org/) which is a YouTube-like ID string.
The output alphabet is `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`. The output alphabet is `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`.
Do not use this function for hashing - the generated IDs can be decoded back into numbers. Do not use this function for hashing - the generated IDs can be decoded back into the original numbers.
**Syntax** **Syntax**
```sql ```sql
sqid(number1, ...) sqidEncode(number1, ...)
``` ```
Alias: `sqid`
**Arguments** **Arguments**
- A variable number of UInt8, UInt16, UInt32 or UInt64 numbers. - A variable number of UInt8, UInt16, UInt32 or UInt64 numbers.
**Returned Value** **Returned Value**
A hash id [String](/docs/en/sql-reference/data-types/string.md). A sqid [String](/docs/en/sql-reference/data-types/string.md).
**Example** **Example**
```sql ```sql
SELECT sqid(1, 2, 3, 4, 5); SELECT sqidEncode(1, 2, 3, 4, 5);
``` ```
```response ```response
┌─sqid(1, 2, 3, 4, 5)─┐ ┌─sqidEncode(1, 2, 3, 4, 5)─┐
│ gXHfJ1C6dN │ │ gXHfJ1C6dN │
└─────────────────────┘ └───────────────────────────┘
```
## sqidDecode
Decodes a [Sqid](https://sqids.org/) back into its original numbers.
Returns an empty array in case the input string is not a valid sqid.
**Syntax**
```sql
sqidDecode(sqid)
```
**Arguments**
- A sqid - [String](/docs/en/sql-reference/data-types/string.md)
**Returned Value**
The sqid transformed to numbers [Array(UInt64)](/docs/en/sql-reference/data-types/array.md).
**Example**
```sql
SELECT sqidDecode('gXHfJ1C6dN');
```
```response
┌─sqidDecode('gXHfJ1C6dN')─┐
│ [1,2,3,4,5] │
└──────────────────────────┘
``` ```

View File

@ -731,7 +731,7 @@ Alias: `FROM_BASE64`.
Like `base64Decode` but returns an empty string in case of error. Like `base64Decode` but returns an empty string in case of error.
## endsWith ## endsWith {#endswith}
Returns whether string `str` ends with `suffix`. Returns whether string `str` ends with `suffix`.
@ -765,7 +765,7 @@ Result:
└──────────────────────────┴──────────────────────┘ └──────────────────────────┴──────────────────────┘
``` ```
## startsWith ## startsWith {#startswith}
Returns whether string `str` starts with `prefix`. Returns whether string `str` starts with `prefix`.
@ -1383,6 +1383,148 @@ Result:
└──────────────────┘ └──────────────────┘
``` ```
## punycodeEncode
Returns the [Punycode](https://en.wikipedia.org/wiki/Punycode) representation of a string.
The string must be UTF8-encoded, otherwise the behavior is undefined.
**Syntax**
``` sql
punycodeEncode(val)
```
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
**Returned value**
- A Punycode representation of the input value. [String](../data-types/string.md)
**Example**
``` sql
select punycodeEncode('München');
```
Result:
```result
┌─punycodeEncode('München')─┐
│ Mnchen-3ya │
└───────────────────────────┘
```
## punycodeDecode
Returns the UTF8-encoded plaintext of a [Punycode](https://en.wikipedia.org/wiki/Punycode)-encoded string.
If no valid Punycode-encoded string is given, an exception is thrown.
**Syntax**
``` sql
punycodeEncode(val)
```
**Arguments**
- `val` - Punycode-encoded string. [String](../data-types/string.md)
**Returned value**
- The plaintext of the input value. [String](../data-types/string.md)
**Example**
``` sql
select punycodeDecode('Mnchen-3ya');
```
Result:
```result
┌─punycodeDecode('Mnchen-3ya')─┐
│ München │
└──────────────────────────────┘
```
## tryPunycodeDecode
Like `punycodeDecode` but returns an empty string if no valid Punycode-encoded string is given.
## idnaEncode
Returns the the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
The input string must be UTF-encoded and translatable to an ASCII string, otherwise an exception is thrown.
Note: No percent decoding or trimming of tabs, spaces or control characters is performed.
**Syntax**
```sql
idnaEncode(val)
```
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
**Returned value**
- A ASCII representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
**Example**
``` sql
select idnaEncode('straße.münchen.de');
```
Result:
```result
┌─idnaEncode('straße.münchen.de')─────┐
│ xn--strae-oqa.xn--mnchen-3ya.de │
└─────────────────────────────────────┘
```
## tryIdnaEncode
Like `idnaEncode` but returns an empty string in case of an error instead of throwing an exception.
## idnaDecode
Returns the the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
In case of an error (e.g. because the input is invalid), the input string is returned.
Note that repeated application of `idnaEncode()` and `idnaDecode()` does not necessarily return the original string due to case normalization.
**Syntax**
```sql
idnaDecode(val)
```
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
**Returned value**
- A Unicode (UTF-8) representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
**Example**
``` sql
select idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de');
```
Result:
```result
┌─idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de')─┐
│ straße.münchen.de │
└───────────────────────────────────────────────┘
```
## byteHammingDistance ## byteHammingDistance
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings. Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
@ -1463,6 +1605,78 @@ Result:
Alias: levenshteinDistance Alias: levenshteinDistance
## damerauLevenshteinDistance
Calculates the [Damerau-Levenshtein distance](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance) between two byte strings.
**Syntax**
```sql
damerauLevenshteinDistance(string1, string2)
```
**Examples**
``` sql
SELECT damerauLevenshteinDistance('clickhouse', 'mouse');
```
Result:
``` text
┌─damerauLevenshteinDistance('clickhouse', 'mouse')─┐
│ 6 │
└───────────────────────────────────────────────────┘
```
## jaroSimilarity
Calculates the [Jaro similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance#Jaro_similarity) between two byte strings.
**Syntax**
```sql
jaroSimilarity(string1, string2)
```
**Examples**
``` sql
SELECT jaroSimilarity('clickhouse', 'click');
```
Result:
``` text
┌─jaroSimilarity('clickhouse', 'click')─┐
│ 0.8333333333333333 │
└───────────────────────────────────────┘
```
## jaroWinklerSimilarity
Calculates the [Jaro-Winkler similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance#Jaro%E2%80%93Winkler_similarity) between two byte strings.
**Syntax**
```sql
jaroWinklerSimilarity(string1, string2)
```
**Examples**
``` sql
SELECT jaroWinklerSimilarity('clickhouse', 'click');
```
Result:
``` text
┌─jaroWinklerSimilarity('clickhouse', 'click')─┐
│ 0.8999999999999999 │
└──────────────────────────────────────────────┘
```
## initcap ## initcap
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters. Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.

View File

@ -207,7 +207,7 @@ Functions `multiSearchFirstIndexCaseInsensitive`, `multiSearchFirstIndexUTF8` an
multiSearchFirstIndex(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\]) multiSearchFirstIndex(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\])
``` ```
## multiSearchAny ## multiSearchAny {#multisearchany}
Returns 1, if at least one string needle<sub>i</sub> matches the string `haystack` and 0 otherwise. Returns 1, if at least one string needle<sub>i</sub> matches the string `haystack` and 0 otherwise.
@ -219,7 +219,7 @@ Functions `multiSearchAnyCaseInsensitive`, `multiSearchAnyUTF8` and `multiSearch
multiSearchAny(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\]) multiSearchAny(haystack, \[needle<sub>1</sub>, needle<sub>2</sub>, …, needle<sub>n</sub>\])
``` ```
## match ## match {#match}
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular syntax](https://github.com/google/re2/wiki/Syntax). Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular syntax](https://github.com/google/re2/wiki/Syntax).
@ -414,7 +414,7 @@ Result:
└────────────────────────────────────────────────────────────────────────────────────────┘ └────────────────────────────────────────────────────────────────────────────────────────┘
``` ```
## like ## like {#like}
Returns whether string `haystack` matches the LIKE expression `pattern`. Returns whether string `haystack` matches the LIKE expression `pattern`.
@ -445,7 +445,7 @@ like(haystack, pattern)
Alias: `haystack LIKE pattern` (operator) Alias: `haystack LIKE pattern` (operator)
## notLike ## notLike {#notlike}
Like `like` but negates the result. Like `like` but negates the result.

View File

@ -57,3 +57,56 @@ Result:
│ 6 │ │ 6 │
└─────────┘ └─────────┘
``` ```
## seriesDecomposeSTL
Decomposes a time series using STL [(Seasonal-Trend Decomposition Procedure Based on Loess)](https://www.wessa.net/download/stl.pdf) into a season, a trend and a residual component.
**Syntax**
``` sql
seriesDecomposeSTL(series, period);
```
**Arguments**
- `series` - An array of numeric values
- `period` - A positive integer
The number of data points in `series` should be at least twice the value of `period`.
**Returned value**
- An array of three arrays where the first array include seasonal components, the second array - trend,
and the third array - residue component.
Type: [Array](../../sql-reference/data-types/array.md).
**Examples**
Query:
``` sql
SELECT seriesDecomposeSTL([10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34], 3) AS print_0;
```
Result:
``` text
┌───────────print_0──────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ [[
-13.529999, -3.1799996, 16.71, -13.53, -3.1799996, 16.71, -13.53, -3.1799996,
16.71, -13.530001, -3.18, 16.710001, -13.530001, -3.1800003, 16.710001, -13.530001,
-3.1800003, 16.710001, -13.530001, -3.1799994, 16.71, -13.529999, -3.1799994, 16.709997
],
[
23.63, 23.63, 23.630003, 23.630001, 23.630001, 23.630001, 23.630001, 23.630001,
23.630001, 23.630001, 23.630001, 23.63, 23.630001, 23.630001, 23.63, 23.630001,
23.630001, 23.63, 23.630001, 23.630001, 23.630001, 23.630001, 23.630001, 23.630003
],
[
0, 0.0000019073486, -0.0000019073486, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0000019073486, 0,
0
]] │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -293,6 +293,8 @@ You can't combine both ways in one query.
Along with columns descriptions constraints could be defined: Along with columns descriptions constraints could be defined:
### CONSTRAINT
``` sql ``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
( (
@ -307,6 +309,30 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Adding large amount of constraints can negatively affect performance of big `INSERT` queries. Adding large amount of constraints can negatively affect performance of big `INSERT` queries.
### ASSUME
The `ASSUME` clause is used to define a `CONSTRAINT` on a table that is assumed to be true. This constraint can then be used by the optimizer to enhance the performance of SQL queries.
Take this example where `ASSUME CONSTRAINT` is used in the creation of the `users_a` table:
```sql
CREATE TABLE users_a (
uid Int16,
name String,
age Int16,
name_len UInt8 MATERIALIZED length(name),
CONSTRAINT c1 ASSUME length(name) = name_len
)
ENGINE=MergeTree
ORDER BY (name_len, name);
```
Here, `ASSUME CONSTRAINT` is used to assert that the `length(name)` function always equals the value of the `name_len` column. This means that whenever `length(name)` is called in a query, ClickHouse can replace it with `name_len`, which should be faster because it avoids calling the `length()` function.
Then, when executing the query `SELECT name FROM users_a WHERE length(name) < 5;`, ClickHouse can optimize it to `SELECT name FROM users_a WHERE name_len < 5`; because of the `ASSUME CONSTRAINT`. This can make the query run faster because it avoids calculating the length of `name` for each row.
`ASSUME CONSTRAINT` **does not enforce the constraint**, it merely informs the optimizer that the constraint holds true. If the constraint is not actually true, the results of the queries may be incorrect. Therefore, you should only use `ASSUME CONSTRAINT` if you are sure that the constraint is true.
## TTL Expression ## TTL Expression
Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
@ -372,15 +398,23 @@ ClickHouse supports general purpose codecs and specialized codecs.
#### ZSTD #### ZSTD
`ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default value: 1. `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default level: 1.
High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage.
#### ZSTD_QAT
`ZSTD_QAT[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable level, implemented by [Intel® QATlib](https://github.com/intel/qatlib) and [Intel® QAT ZSTD Plugin](https://github.com/intel/QAT-ZSTD-Plugin). Possible levels: \[1, 12\]. Default level: 1. Recommended level range: \[6, 12\]. Some limitations apply:
- ZSTD_QAT is disabled by default and can only be used after enabling configuration setting [enable_zstd_qat_codec](../../../operations/settings/settings.md#enable_zstd_qat_codec).
- For compression, ZSTD_QAT tries to use an Intel® QAT offloading device ([QuickAssist Technology](https://www.intel.com/content/www/us/en/developer/topic-technology/open/quick-assist-technology/overview.html)). If no such device was found, it will fallback to ZSTD compression in software.
- Decompression is always performed in software.
#### DEFLATE_QPL #### DEFLATE_QPL
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply: `DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`. - DEFLATE_QPL is disabled by default and can only be used after enabling configuration setting [enable_deflate_qpl_codec](../../../operations/settings/settings.md#enable_deflate_qpl_codec).
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details. - DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.

View File

@ -11,7 +11,7 @@ Its name comes from the fact that it can be looked at as executing `JOIN` with a
Syntax: Syntax:
``` sql ```sql
SELECT <expr_list> SELECT <expr_list>
FROM <left_subquery> FROM <left_subquery>
[LEFT] ARRAY JOIN <array> [LEFT] ARRAY JOIN <array>
@ -30,7 +30,7 @@ Supported types of `ARRAY JOIN` are listed below:
The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Lets create a table with an [Array](../../../sql-reference/data-types/array.md) type column and insert values into it: The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Lets create a table with an [Array](../../../sql-reference/data-types/array.md) type column and insert values into it:
``` sql ```sql
CREATE TABLE arrays_test CREATE TABLE arrays_test
( (
s String, s String,
@ -41,7 +41,7 @@ INSERT INTO arrays_test
VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
``` ```
``` text ```response
┌─s───────────┬─arr─────┐ ┌─s───────────┬─arr─────┐
│ Hello │ [1,2] │ │ Hello │ [1,2] │
│ World │ [3,4,5] │ │ World │ [3,4,5] │
@ -51,13 +51,13 @@ VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []);
The example below uses the `ARRAY JOIN` clause: The example below uses the `ARRAY JOIN` clause:
``` sql ```sql
SELECT s, arr SELECT s, arr
FROM arrays_test FROM arrays_test
ARRAY JOIN arr; ARRAY JOIN arr;
``` ```
``` text ```response
┌─s─────┬─arr─┐ ┌─s─────┬─arr─┐
│ Hello │ 1 │ │ Hello │ 1 │
│ Hello │ 2 │ │ Hello │ 2 │
@ -69,13 +69,13 @@ ARRAY JOIN arr;
The next example uses the `LEFT ARRAY JOIN` clause: The next example uses the `LEFT ARRAY JOIN` clause:
``` sql ```sql
SELECT s, arr SELECT s, arr
FROM arrays_test FROM arrays_test
LEFT ARRAY JOIN arr; LEFT ARRAY JOIN arr;
``` ```
``` text ```response
┌─s───────────┬─arr─┐ ┌─s───────────┬─arr─┐
│ Hello │ 1 │ │ Hello │ 1 │
│ Hello │ 2 │ │ Hello │ 2 │
@ -90,13 +90,13 @@ LEFT ARRAY JOIN arr;
An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example: An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example:
``` sql ```sql
SELECT s, arr, a SELECT s, arr, a
FROM arrays_test FROM arrays_test
ARRAY JOIN arr AS a; ARRAY JOIN arr AS a;
``` ```
``` text ```response
┌─s─────┬─arr─────┬─a─┐ ┌─s─────┬─arr─────┬─a─┐
│ Hello │ [1,2] │ 1 │ │ Hello │ [1,2] │ 1 │
│ Hello │ [1,2] │ 2 │ │ Hello │ [1,2] │ 2 │
@ -108,13 +108,13 @@ ARRAY JOIN arr AS a;
Using aliases, you can perform `ARRAY JOIN` with an external array. For example: Using aliases, you can perform `ARRAY JOIN` with an external array. For example:
``` sql ```sql
SELECT s, arr_external SELECT s, arr_external
FROM arrays_test FROM arrays_test
ARRAY JOIN [1, 2, 3] AS arr_external; ARRAY JOIN [1, 2, 3] AS arr_external;
``` ```
``` text ```response
┌─s───────────┬─arr_external─┐ ┌─s───────────┬─arr_external─┐
│ Hello │ 1 │ │ Hello │ 1 │
│ Hello │ 2 │ │ Hello │ 2 │
@ -130,13 +130,13 @@ ARRAY JOIN [1, 2, 3] AS arr_external;
Multiple arrays can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Note that all the arrays must have the same size by default. Example: Multiple arrays can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Note that all the arrays must have the same size by default. Example:
``` sql ```sql
SELECT s, arr, a, num, mapped SELECT s, arr, a, num, mapped
FROM arrays_test FROM arrays_test
ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped;
``` ```
``` text ```response
┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ ┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐
│ Hello │ [1,2] │ 1 │ 1 │ 2 │ │ Hello │ [1,2] │ 1 │ 1 │ 2 │
│ Hello │ [1,2] │ 2 │ 2 │ 3 │ │ Hello │ [1,2] │ 2 │ 2 │ 3 │
@ -148,13 +148,13 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma
The example below uses the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function: The example below uses the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function:
``` sql ```sql
SELECT s, arr, a, num, arrayEnumerate(arr) SELECT s, arr, a, num, arrayEnumerate(arr)
FROM arrays_test FROM arrays_test
ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
``` ```
``` text ```response
┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ ┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐
│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ │ Hello │ [1,2] │ 1 │ 1 │ [1,2] │
│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ │ Hello │ [1,2] │ 2 │ 2 │ [1,2] │
@ -163,6 +163,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num;
│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ │ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │
└───────┴─────────┴───┴─────┴─────────────────────┘ └───────┴─────────┴───┴─────┴─────────────────────┘
``` ```
Multiple arrays with different sizes can be joined by using: `SETTINGS enable_unaligned_array_join = 1`. Example: Multiple arrays with different sizes can be joined by using: `SETTINGS enable_unaligned_array_join = 1`. Example:
```sql ```sql
@ -171,7 +172,7 @@ FROM arrays_test ARRAY JOIN arr as a, [['a','b'],['c']] as b
SETTINGS enable_unaligned_array_join = 1; SETTINGS enable_unaligned_array_join = 1;
``` ```
```text ```response
┌─s───────┬─arr─────┬─a─┬─b─────────┐ ┌─s───────┬─arr─────┬─a─┬─b─────────┐
│ Hello │ [1,2] │ 1 │ ['a','b'] │ │ Hello │ [1,2] │ 1 │ ['a','b'] │
│ Hello │ [1,2] │ 2 │ ['c'] │ │ Hello │ [1,2] │ 2 │ ['c'] │
@ -187,7 +188,7 @@ SETTINGS enable_unaligned_array_join = 1;
`ARRAY JOIN` also works with [nested data structures](../../../sql-reference/data-types/nested-data-structures/index.md): `ARRAY JOIN` also works with [nested data structures](../../../sql-reference/data-types/nested-data-structures/index.md):
``` sql ```sql
CREATE TABLE nested_test CREATE TABLE nested_test
( (
s String, s String,
@ -200,7 +201,7 @@ INSERT INTO nested_test
VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []);
``` ```
``` text ```response
┌─s───────┬─nest.x──┬─nest.y─────┐ ┌─s───────┬─nest.x──┬─nest.y─────┐
│ Hello │ [1,2] │ [10,20] │ │ Hello │ [1,2] │ [10,20] │
│ World │ [3,4,5] │ [30,40,50] │ │ World │ [3,4,5] │ [30,40,50] │
@ -208,13 +209,13 @@ VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', []
└─────────┴─────────┴────────────┘ └─────────┴─────────┴────────────┘
``` ```
``` sql ```sql
SELECT s, `nest.x`, `nest.y` SELECT s, `nest.x`, `nest.y`
FROM nested_test FROM nested_test
ARRAY JOIN nest; ARRAY JOIN nest;
``` ```
``` text ```response
┌─s─────┬─nest.x─┬─nest.y─┐ ┌─s─────┬─nest.x─┬─nest.y─┐
│ Hello │ 1 │ 10 │ │ Hello │ 1 │ 10 │
│ Hello │ 2 │ 20 │ │ Hello │ 2 │ 20 │
@ -226,13 +227,13 @@ ARRAY JOIN nest;
When specifying names of nested data structures in `ARRAY JOIN`, the meaning is the same as `ARRAY JOIN` with all the array elements that it consists of. Examples are listed below: When specifying names of nested data structures in `ARRAY JOIN`, the meaning is the same as `ARRAY JOIN` with all the array elements that it consists of. Examples are listed below:
``` sql ```sql
SELECT s, `nest.x`, `nest.y` SELECT s, `nest.x`, `nest.y`
FROM nested_test FROM nested_test
ARRAY JOIN `nest.x`, `nest.y`; ARRAY JOIN `nest.x`, `nest.y`;
``` ```
``` text ```response
┌─s─────┬─nest.x─┬─nest.y─┐ ┌─s─────┬─nest.x─┬─nest.y─┐
│ Hello │ 1 │ 10 │ │ Hello │ 1 │ 10 │
│ Hello │ 2 │ 20 │ │ Hello │ 2 │ 20 │
@ -244,13 +245,13 @@ ARRAY JOIN `nest.x`, `nest.y`;
This variation also makes sense: This variation also makes sense:
``` sql ```sql
SELECT s, `nest.x`, `nest.y` SELECT s, `nest.x`, `nest.y`
FROM nested_test FROM nested_test
ARRAY JOIN `nest.x`; ARRAY JOIN `nest.x`;
``` ```
``` text ```response
┌─s─────┬─nest.x─┬─nest.y─────┐ ┌─s─────┬─nest.x─┬─nest.y─────┐
│ Hello │ 1 │ [10,20] │ │ Hello │ 1 │ [10,20] │
│ Hello │ 2 │ [10,20] │ │ Hello │ 2 │ [10,20] │
@ -262,13 +263,13 @@ ARRAY JOIN `nest.x`;
An alias may be used for a nested data structure, in order to select either the `JOIN` result or the source array. Example: An alias may be used for a nested data structure, in order to select either the `JOIN` result or the source array. Example:
``` sql ```sql
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`
FROM nested_test FROM nested_test
ARRAY JOIN nest AS n; ARRAY JOIN nest AS n;
``` ```
``` text ```response
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │
@ -280,13 +281,13 @@ ARRAY JOIN nest AS n;
Example of using the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function: Example of using the [arrayEnumerate](../../../sql-reference/functions/array-functions.md#array_functions-arrayenumerate) function:
``` sql ```sql
SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num
FROM nested_test FROM nested_test
ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
``` ```
``` text ```response
┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ ┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐
│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ │ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │
│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ │ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │
@ -300,6 +301,11 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer. The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer.
### Incompatibility with short-circuit function evaluation
[Short-circuit function evaluation](../../../operations/settings/index.md#short-circuit-function-evaluation) is a feature that optimizes the execution of complex expressions in specific functions such as `if`, `multiIf`, `and`, and `or`. It prevents potential exceptions, such as division by zero, from occurring during the execution of these functions.
`arrayJoin` is always executed and not supported for short circuit function evaluation. That's because it's a unique function processed separately from all other functions during query analysis and execution and requires additional logic that doesn't work with short circuit function execution. The reason is that the number of rows in the result depends on the arrayJoin result, and it's too complex and expensive to implement lazy execution of `arrayJoin`.
## Related content ## Related content

View File

@ -12,7 +12,7 @@ Join produces a new table by combining columns from one or multiple tables by us
``` sql ``` sql
SELECT <expr_list> SELECT <expr_list>
FROM <left_table> FROM <left_table>
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table> [GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ALL|ASOF] JOIN <right_table>
(ON <expr_list>)|(USING <column_list>) ... (ON <expr_list>)|(USING <column_list>) ...
``` ```
@ -296,6 +296,34 @@ PASTE JOIN
│ 1 │ 0 │ │ 1 │ 0 │
└───┴──────┘ └───┴──────┘
``` ```
Note: In this case result can be nondeterministic if the reading is parallel. Example:
```SQL
SELECT *
FROM
(
SELECT number AS a
FROM numbers_mt(5)
) AS t1
PASTE JOIN
(
SELECT number AS a
FROM numbers(10)
ORDER BY a DESC
) AS t2
SETTINGS max_block_size = 2;
┌─a─┬─t2.a─┐
│ 2 │ 9 │
│ 3 │ 8 │
└───┴──────┘
┌─a─┬─t2.a─┐
│ 0 │ 7 │
│ 1 │ 6 │
└───┴──────┘
┌─a─┬─t2.a─┐
│ 4 │ 5 │
└───┴──────┘
```
## Distributed JOIN ## Distributed JOIN

View File

@ -1559,7 +1559,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
QueryPipeline input; QueryPipeline input;
QueryPipeline output; QueryPipeline output;
{ {
BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute(); BlockIO io_insert = InterpreterFactory::instance().get(query_insert_ast, context_insert)->execute();
InterpreterSelectWithUnionQuery select(query_select_ast, context_select, SelectQueryOptions{}); InterpreterSelectWithUnionQuery select(query_select_ast, context_select, SelectQueryOptions{});
QueryPlan plan; QueryPlan plan;
@ -1944,7 +1944,7 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts,
auto local_context = Context::createCopy(context); auto local_context = Context::createCopy(context);
local_context->setSettings(task_cluster->settings_pull); local_context->setSettings(task_cluster->settings_pull);
auto pipeline = InterpreterFactory::get(query_ast, local_context)->execute().pipeline; auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline;
PullingPipelineExecutor executor(pipeline); PullingPipelineExecutor executor(pipeline);
Block block; Block block;
executor.pull(block); executor.pull(block);
@ -1989,7 +1989,7 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
auto local_context = Context::createCopy(context); auto local_context = Context::createCopy(context);
local_context->setSettings(task_cluster->settings_pull); local_context->setSettings(task_cluster->settings_pull);
auto pipeline = InterpreterFactory::get(query_ast, local_context)->execute().pipeline; auto pipeline = InterpreterFactory::instance().get(query_ast, local_context)->execute().pipeline;
PullingPipelineExecutor executor(pipeline); PullingPipelineExecutor executor(pipeline);
Block result; Block result;
executor.pull(result); executor.pull(result);

View File

@ -4,6 +4,7 @@
#include <Common/TerminalSize.h> #include <Common/TerminalSize.h>
#include <Databases/registerDatabases.h> #include <Databases/registerDatabases.h>
#include <IO/ConnectionTimeouts.h> #include <IO/ConnectionTimeouts.h>
#include <Interpreters/registerInterpreters.h>
#include <Formats/registerFormats.h> #include <Formats/registerFormats.h>
#include <Common/scope_guard_safe.h> #include <Common/scope_guard_safe.h>
#include <unistd.h> #include <unistd.h>
@ -157,6 +158,7 @@ void ClusterCopierApp::mainImpl()
context->setApplicationType(Context::ApplicationType::LOCAL); context->setApplicationType(Context::ApplicationType::LOCAL);
context->setPath(process_path + "/"); context->setPath(process_path + "/");
registerInterpreters();
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();
registerTableFunctions(); registerTableFunctions();

View File

@ -17,15 +17,7 @@
#include <Common/Config/ConfigProcessor.h> #include <Common/Config/ConfigProcessor.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/parseGlobs.h> #include <Common/parseGlobs.h>
#include <Common/re2.h>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
static void setupLogging(const std::string & log_level) static void setupLogging(const std::string & log_level)
{ {

View File

@ -3,16 +3,19 @@
#include <string_view> #include <string_view>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <IO/copyData.h>
#include <IO/ReadBufferFromFileDescriptor.h> #include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromFileDescriptor.h> #include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/WriteBufferFromOStream.h> #include <IO/WriteBufferFromOStream.h>
#include <Interpreters/registerInterpreters.h>
#include <Parsers/ASTInsertQuery.h> #include <Parsers/ASTInsertQuery.h>
#include <Parsers/ParserQuery.h> #include <Parsers/ParserQuery.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Parsers/obfuscateQueries.h> #include <Parsers/obfuscateQueries.h>
#include <Parsers/parseQuery.h> #include <Parsers/parseQuery.h>
#include <Common/ErrorCodes.h> #include <Common/ErrorCodes.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/TerminalSize.h> #include <Common/TerminalSize.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
@ -29,22 +32,49 @@
#include <DataTypes/DataTypeFactory.h> #include <DataTypes/DataTypeFactory.h>
#include <Formats/FormatFactory.h> #include <Formats/FormatFactory.h>
#include <Formats/registerFormats.h> #include <Formats/registerFormats.h>
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
namespace DB::ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
namespace
{
void skipSpacesAndComments(const char*& pos, const char* end, bool print_comments)
{
do
{
/// skip spaces to avoid throw exception after last query
while (pos != end && std::isspace(*pos))
++pos;
const char * comment_begin = pos;
/// for skip comment after the last query and to not throw exception
if (end - pos > 2 && *pos == '-' && *(pos + 1) == '-')
{
pos += 2;
/// skip until the end of the line
while (pos != end && *pos != '\n')
++pos;
if (print_comments)
std::cout << std::string_view(comment_begin, pos - comment_begin) << "\n";
}
/// need to parse next sql
else
break;
} while (pos != end);
}
}
#pragma GCC diagnostic ignored "-Wunused-function" #pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wmissing-declarations" #pragma GCC diagnostic ignored "-Wmissing-declarations"
extern const char * auto_time_zones[]; extern const char * auto_time_zones[];
namespace DB
{
namespace ErrorCodes
{
extern const int INVALID_FORMAT_INSERT_QUERY_WITH_DATA;
}
}
int mainEntryClickHouseFormat(int argc, char ** argv) int mainEntryClickHouseFormat(int argc, char ** argv)
{ {
using namespace DB; using namespace DB;
@ -55,8 +85,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
desc.add_options() desc.add_options()
("query", po::value<std::string>(), "query to format") ("query", po::value<std::string>(), "query to format")
("help,h", "produce help message") ("help,h", "produce help message")
("comments", "keep comments in the output")
("hilite", "add syntax highlight with ANSI terminal escape sequences") ("hilite", "add syntax highlight with ANSI terminal escape sequences")
("oneline", "format in single line") ("oneline", "format in single line")
("max_line_length", po::value<size_t>()->default_value(0), "format in single line queries with length less than specified")
("quiet,q", "just check syntax, no output on success") ("quiet,q", "just check syntax, no output on success")
("multiquery,n", "allow multiple queries in the same file") ("multiquery,n", "allow multiple queries in the same file")
("obfuscate", "obfuscate instead of formatting") ("obfuscate", "obfuscate instead of formatting")
@ -88,6 +120,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
bool oneline = options.count("oneline"); bool oneline = options.count("oneline");
bool quiet = options.count("quiet"); bool quiet = options.count("quiet");
bool multiple = options.count("multiquery"); bool multiple = options.count("multiquery");
bool print_comments = options.count("comments");
size_t max_line_length = options["max_line_length"].as<size_t>();
bool obfuscate = options.count("obfuscate"); bool obfuscate = options.count("obfuscate");
bool backslash = options.count("backslash"); bool backslash = options.count("backslash");
bool allow_settings_after_format_in_insert = options.count("allow_settings_after_format_in_insert"); bool allow_settings_after_format_in_insert = options.count("allow_settings_after_format_in_insert");
@ -104,6 +138,19 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
return 2; return 2;
} }
if (oneline && max_line_length)
{
std::cerr << "Options 'oneline' and 'max_line_length' are mutually exclusive." << std::endl;
return 2;
}
if (max_line_length > 255)
{
std::cerr << "Option 'max_line_length' must be less than 256." << std::endl;
return 2;
}
String query; String query;
if (options.count("query")) if (options.count("query"))
@ -124,10 +171,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
if (options.count("seed")) if (options.count("seed"))
{ {
std::string seed;
hash_func.update(options["seed"].as<std::string>()); hash_func.update(options["seed"].as<std::string>());
} }
registerInterpreters();
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();
registerTableFunctions(); registerTableFunctions();
@ -179,30 +226,75 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
{ {
const char * pos = query.data(); const char * pos = query.data();
const char * end = pos + query.size(); const char * end = pos + query.size();
skipSpacesAndComments(pos, end, print_comments);
ParserQuery parser(end, allow_settings_after_format_in_insert); ParserQuery parser(end, allow_settings_after_format_in_insert);
do while (pos != end)
{ {
size_t approx_query_length = multiple ? find_first_symbols<';'>(pos, end) - pos : end - pos;
ASTPtr res = parseQueryAndMovePosition( ASTPtr res = parseQueryAndMovePosition(
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth); parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth);
/// For insert query with data(INSERT INTO ... VALUES ...), that will lead to the formatting failure, std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
/// we should throw an exception early, and make exception message more readable. /// If the query is INSERT ... VALUES, then we will try to parse the data.
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data) if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
{ {
throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA, if ("Values" != insert_query->format)
"Can't format ASTInsertQuery with data, since data will be lost"); throw Exception(DB::ErrorCodes::NOT_IMPLEMENTED, "Can't format INSERT query with data format '{}'", insert_query->format);
/// Reset format to default to have `INSERT INTO table VALUES` instead of `INSERT INTO table VALUES FORMAT Values`
insert_query->format = {};
/// We assume that data ends with a newline character (same as client does)
const char * this_query_end = find_first_symbols<'\n'>(insert_query->data, end);
insert_query->end = this_query_end;
pos = this_query_end;
insert_query_payload = getReadBufferFromASTInsertQuery(res);
} }
if (!quiet) if (!quiet)
{ {
if (!backslash) if (!backslash)
{ {
WriteBufferFromOStream res_buf(std::cout, 4096); WriteBufferFromOwnString str_buf;
formatAST(*res, res_buf, hilite, oneline); formatAST(*res, str_buf, hilite, oneline || approx_query_length < max_line_length);
res_buf.finalize();
if (multiple) if (insert_query_payload)
std::cout << "\n;\n"; {
str_buf.write(' ');
copyData(*insert_query_payload, str_buf);
}
String res_string = str_buf.str();
const char * s_pos = res_string.data();
const char * s_end = s_pos + res_string.size();
/// remove trailing spaces
while (s_end > s_pos && isWhitespaceASCIIOneLine(*(s_end - 1)))
--s_end;
WriteBufferFromOStream res_cout(std::cout, 4096);
/// For multiline queries we print ';' at new line,
/// but for single line queries we print ';' at the same line
bool has_multiple_lines = false;
while (s_pos != s_end)
{
if (*s_pos == '\n')
has_multiple_lines = true;
res_cout.write(*s_pos++);
}
res_cout.finalize();
if (multiple && !insert_query_payload)
{
if (oneline || !has_multiple_lines)
std::cout << ";\n";
else
std::cout << "\n;\n";
}
else if (multiple && insert_query_payload)
/// Do not need to add ; because it's already in the insert_query_payload
std::cout << "\n";
std::cout << std::endl; std::cout << std::endl;
} }
/// add additional '\' at the end of each line; /// add additional '\' at the end of each line;
@ -230,27 +322,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
std::cout << std::endl; std::cout << std::endl;
} }
} }
skipSpacesAndComments(pos, end, print_comments);
do if (!multiple)
{ break;
/// skip spaces to avoid throw exception after last query }
while (pos != end && std::isspace(*pos))
++pos;
/// for skip comment after the last query and to not throw exception
if (end - pos > 2 && *pos == '-' && *(pos + 1) == '-')
{
pos += 2;
/// skip until the end of the line
while (pos != end && *pos != '\n')
++pos;
}
/// need to parse next sql
else
break;
} while (pos != end);
} while (multiple && pos != end);
} }
} }
catch (...) catch (...)

View File

@ -16,6 +16,7 @@
#include <Common/SipHash.h> #include <Common/SipHash.h>
#include <Common/StringUtils/StringUtils.h> #include <Common/StringUtils/StringUtils.h>
#include <Common/ShellCommand.h> #include <Common/ShellCommand.h>
#include <Common/re2.h>
#include <base/find_symbols.h> #include <base/find_symbols.h>
#include <IO/copyData.h> #include <IO/copyData.h>
@ -24,15 +25,6 @@
#include <IO/WriteBufferFromFile.h> #include <IO/WriteBufferFromFile.h>
#include <IO/WriteBufferFromFileDescriptor.h> #include <IO/WriteBufferFromFileDescriptor.h>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
static constexpr auto documentation = R"( static constexpr auto documentation = R"(
A tool to extract information from Git repository for analytics. A tool to extract information from Git repository for analytics.

View File

@ -95,6 +95,7 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/CurrentThread.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/CurrentThread.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollections.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollections.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollectionConfiguration.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/NamedCollections/NamedCollectionConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/Jemalloc.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/IKeeper.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/IKeeper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/TestKeeper.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Common/ZooKeeper/TestKeeper.cpp

View File

@ -2,6 +2,7 @@
#include "CatBoostLibraryHandler.h" #include "CatBoostLibraryHandler.h"
#include "CatBoostLibraryHandlerFactory.h" #include "CatBoostLibraryHandlerFactory.h"
#include "Common/ProfileEvents.h"
#include "ExternalDictionaryLibraryHandler.h" #include "ExternalDictionaryLibraryHandler.h"
#include "ExternalDictionaryLibraryHandlerFactory.h" #include "ExternalDictionaryLibraryHandlerFactory.h"
@ -44,7 +45,7 @@ namespace
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent()) if (!response.sent())
*response.send() << message << std::endl; *response.send() << message << '\n';
LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message)); LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message));
} }
@ -96,7 +97,7 @@ ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRe
} }
void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());
HTMLForm params(getContext()->getSettingsRef(), request); HTMLForm params(getContext()->getSettingsRef(), request);
@ -384,7 +385,7 @@ ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExi
} }
void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
try try
{ {
@ -423,7 +424,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler(
} }
void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());
HTMLForm params(getContext()->getSettingsRef(), request); HTMLForm params(getContext()->getSettingsRef(), request);
@ -463,6 +464,9 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
{ {
if (method == "catboost_list") if (method == "catboost_list")
{ {
auto & read_buf = request.getStream();
params.read(read_buf);
ExternalModelInfos model_infos = CatBoostLibraryHandlerFactory::instance().getModelInfos(); ExternalModelInfos model_infos = CatBoostLibraryHandlerFactory::instance().getModelInfos();
writeIntBinary(static_cast<UInt64>(model_infos.size()), out); writeIntBinary(static_cast<UInt64>(model_infos.size()), out);
@ -500,6 +504,9 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ
} }
else if (method == "catboost_removeAllModels") else if (method == "catboost_removeAllModels")
{ {
auto & read_buf = request.getStream();
params.read(read_buf);
CatBoostLibraryHandlerFactory::instance().removeAllModels(); CatBoostLibraryHandlerFactory::instance().removeAllModels();
String res = "1"; String res = "1";
@ -621,7 +628,7 @@ CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t ke
} }
void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
try try
{ {

View File

@ -20,7 +20,7 @@ class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler,
public: public:
ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
static constexpr inline auto FORMAT = "RowBinary"; static constexpr inline auto FORMAT = "RowBinary";
@ -36,7 +36,7 @@ class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler,
public: public:
ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
const size_t keep_alive_timeout; const size_t keep_alive_timeout;
@ -65,7 +65,7 @@ class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithConte
public: public:
CatBoostLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); CatBoostLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
const size_t keep_alive_timeout; const size_t keep_alive_timeout;
@ -79,7 +79,7 @@ class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContex
public: public:
CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
const size_t keep_alive_timeout; const size_t keep_alive_timeout;

View File

@ -20,6 +20,7 @@
#include <Interpreters/JIT/CompiledExpressionCache.h> #include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Interpreters/ProcessList.h> #include <Interpreters/ProcessList.h>
#include <Interpreters/loadMetadata.h> #include <Interpreters/loadMetadata.h>
#include <Interpreters/registerInterpreters.h>
#include <base/getFQDNOrHostName.h> #include <base/getFQDNOrHostName.h>
#include <Common/scope_guard_safe.h> #include <Common/scope_guard_safe.h>
#include <Interpreters/Session.h> #include <Interpreters/Session.h>
@ -486,6 +487,7 @@ try
Poco::ErrorHandler::set(&error_handler); Poco::ErrorHandler::set(&error_handler);
} }
registerInterpreters();
/// Don't initialize DateLUT /// Don't initialize DateLUT
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();

View File

@ -69,7 +69,7 @@ namespace
} }
void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());
@ -78,7 +78,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
{ {
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent()) if (!response.sent())
*response.send() << message << std::endl; *response.send() << message << '\n';
LOG_WARNING(log, fmt::runtime(message)); LOG_WARNING(log, fmt::runtime(message));
}; };

View File

@ -23,7 +23,7 @@ public:
{ {
} }
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
Poco::Logger * log; Poco::Logger * log;

View File

@ -21,7 +21,7 @@
namespace DB namespace DB
{ {
void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());
@ -30,7 +30,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
{ {
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent()) if (!response.sent())
*response.send() << message << std::endl; response.send()->writeln(message);
LOG_WARNING(log, fmt::runtime(message)); LOG_WARNING(log, fmt::runtime(message));
}; };

View File

@ -21,7 +21,7 @@ public:
{ {
} }
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
Poco::Logger * log; Poco::Logger * log;

View File

@ -46,12 +46,12 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string
{ {
response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent()) if (!response.sent())
*response.send() << message << std::endl; *response.send() << message << '\n';
LOG_WARNING(log, fmt::runtime(message)); LOG_WARNING(log, fmt::runtime(message));
} }
void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
HTMLForm params(getContext()->getSettingsRef(), request); HTMLForm params(getContext()->getSettingsRef(), request);
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());

View File

@ -30,7 +30,7 @@ public:
{ {
} }
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
Poco::Logger * log; Poco::Logger * log;

View File

@ -6,7 +6,7 @@
namespace DB namespace DB
{ {
void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response) void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
try try
{ {

View File

@ -10,7 +10,7 @@ class PingHandler : public HTTPRequestHandler
{ {
public: public:
explicit PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {} explicit PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {}
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
size_t keep_alive_timeout; size_t keep_alive_timeout;

View File

@ -29,7 +29,7 @@ namespace
} }
void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
{ {
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
LOG_TRACE(log, "Request URI: {}", request.getURI()); LOG_TRACE(log, "Request URI: {}", request.getURI());
@ -38,7 +38,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
{ {
response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);
if (!response.sent()) if (!response.sent())
*response.send() << message << std::endl; *response.send() << message << '\n';
LOG_WARNING(log, fmt::runtime(message)); LOG_WARNING(log, fmt::runtime(message));
}; };

View File

@ -24,7 +24,7 @@ public:
{ {
} }
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override;
private: private:
Poco::Logger * log; Poco::Logger * log;

View File

@ -58,6 +58,7 @@
#include <Interpreters/ExternalDictionariesLoader.h> #include <Interpreters/ExternalDictionariesLoader.h>
#include <Interpreters/ProcessList.h> #include <Interpreters/ProcessList.h>
#include <Interpreters/loadMetadata.h> #include <Interpreters/loadMetadata.h>
#include <Interpreters/registerInterpreters.h>
#include <Interpreters/JIT/CompiledExpressionCache.h> #include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Access/AccessControl.h> #include <Access/AccessControl.h>
#include <Storages/StorageReplicatedMergeTree.h> #include <Storages/StorageReplicatedMergeTree.h>
@ -152,6 +153,18 @@ namespace ProfileEvents
{ {
extern const Event MainConfigLoads; extern const Event MainConfigLoads;
extern const Event ServerStartupMilliseconds; extern const Event ServerStartupMilliseconds;
extern const Event InterfaceNativeSendBytes;
extern const Event InterfaceNativeReceiveBytes;
extern const Event InterfaceHTTPSendBytes;
extern const Event InterfaceHTTPReceiveBytes;
extern const Event InterfacePrometheusSendBytes;
extern const Event InterfacePrometheusReceiveBytes;
extern const Event InterfaceInterserverSendBytes;
extern const Event InterfaceInterserverReceiveBytes;
extern const Event InterfaceMySQLSendBytes;
extern const Event InterfaceMySQLReceiveBytes;
extern const Event InterfacePostgreSQLSendBytes;
extern const Event InterfacePostgreSQLReceiveBytes;
} }
namespace fs = std::filesystem; namespace fs = std::filesystem;
@ -646,6 +659,7 @@ try
} }
#endif #endif
registerInterpreters();
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();
registerTableFunctions(); registerTableFunctions();
@ -2047,7 +2061,7 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr
{ {
if (type == "tcp") if (type == "tcp")
return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false)); return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes));
if (type == "tls") if (type == "tls")
#if USE_SSL #if USE_SSL
@ -2059,20 +2073,20 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
if (type == "proxy1") if (type == "proxy1")
return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name)); return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name));
if (type == "mysql") if (type == "mysql")
return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this)); return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes));
if (type == "postgres") if (type == "postgres")
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this)); return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes));
if (type == "http") if (type == "http")
return TCPServerConnectionFactory::Ptr( return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory")) new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes)
); );
if (type == "prometheus") if (type == "prometheus")
return TCPServerConnectionFactory::Ptr( return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory")) new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes)
); );
if (type == "interserver") if (type == "interserver")
return TCPServerConnectionFactory::Ptr( return TCPServerConnectionFactory::Ptr(
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory")) new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), ProfileEvents::InterfaceInterserverReceiveBytes, ProfileEvents::InterfaceInterserverSendBytes)
); );
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type); throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
@ -2205,7 +2219,7 @@ void Server::createServers(
port_name, port_name,
"http://" + address.toString(), "http://" + address.toString(),
std::make_unique<HTTPServer>( std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params)); httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
}); });
} }
@ -2225,7 +2239,7 @@ void Server::createServers(
port_name, port_name,
"https://" + address.toString(), "https://" + address.toString(),
std::make_unique<HTTPServer>( std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params)); httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes));
#else #else
UNUSED(port); UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support."); throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
@ -2248,7 +2262,7 @@ void Server::createServers(
port_name, port_name,
"native protocol (tcp): " + address.toString(), "native protocol (tcp): " + address.toString(),
std::make_unique<TCPServer>( std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false), new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool, server_pool,
socket, socket,
new Poco::Net::TCPServerParams)); new Poco::Net::TCPServerParams));
@ -2270,7 +2284,7 @@ void Server::createServers(
port_name, port_name,
"native protocol (tcp) with PROXY: " + address.toString(), "native protocol (tcp) with PROXY: " + address.toString(),
std::make_unique<TCPServer>( std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true), new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool, server_pool,
socket, socket,
new Poco::Net::TCPServerParams)); new Poco::Net::TCPServerParams));
@ -2293,7 +2307,7 @@ void Server::createServers(
port_name, port_name,
"secure native protocol (tcp_secure): " + address.toString(), "secure native protocol (tcp_secure): " + address.toString(),
std::make_unique<TCPServer>( std::make_unique<TCPServer>(
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false), new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes),
server_pool, server_pool,
socket, socket,
new Poco::Net::TCPServerParams)); new Poco::Net::TCPServerParams));
@ -2317,7 +2331,7 @@ void Server::createServers(
listen_host, listen_host,
port_name, port_name,
"MySQL compatibility protocol: " + address.toString(), "MySQL compatibility protocol: " + address.toString(),
std::make_unique<TCPServer>(new MySQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); std::make_unique<TCPServer>(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
}); });
} }
@ -2334,7 +2348,7 @@ void Server::createServers(
listen_host, listen_host,
port_name, port_name,
"PostgreSQL compatibility protocol: " + address.toString(), "PostgreSQL compatibility protocol: " + address.toString(),
std::make_unique<TCPServer>(new PostgreSQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); std::make_unique<TCPServer>(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams));
}); });
} }
@ -2368,7 +2382,7 @@ void Server::createServers(
port_name, port_name,
"Prometheus: http://" + address.toString(), "Prometheus: http://" + address.toString(),
std::make_unique<HTTPServer>( std::make_unique<HTTPServer>(
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params)); httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes));
}); });
} }
} }
@ -2414,7 +2428,9 @@ void Server::createInterserverServers(
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
server_pool, server_pool,
socket, socket,
http_params)); http_params,
ProfileEvents::InterfaceInterserverReceiveBytes,
ProfileEvents::InterfaceInterserverSendBytes));
}); });
} }
@ -2437,7 +2453,9 @@ void Server::createInterserverServers(
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"), createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
server_pool, server_pool,
socket, socket,
http_params)); http_params,
ProfileEvents::InterfaceInterserverReceiveBytes,
ProfileEvents::InterfaceInterserverSendBytes));
#else #else
UNUSED(port); UNUSED(port);
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support."); throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");

View File

@ -713,11 +713,11 @@
For example, if there two users A, B and a row policy is defined only for A, then For example, if there two users A, B and a row policy is defined only for A, then
if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows. if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
By default this setting is false for compatibility with earlier access configurations. --> By default this setting is false for compatibility with earlier access configurations. -->
<users_without_row_policies_can_read_rows>false</users_without_row_policies_can_read_rows> <users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>
<!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant, <!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
however you can change this behaviour by setting this to true --> however you can change this behaviour by setting this to true -->
<on_cluster_queries_require_cluster_grant>false</on_cluster_queries_require_cluster_grant> <on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>
<!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed <!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
by any user. You can change this behaviour by setting this to true. by any user. You can change this behaviour by setting this to true.
@ -725,19 +725,19 @@
Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors") Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
table (i.e. "system.users") will be accessible. --> table (i.e. "system.users") will be accessible. -->
<select_from_system_db_requires_grant>false</select_from_system_db_requires_grant> <select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>
<!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be <!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
executed by any user. You can change this behaviour by setting this to true. executed by any user. You can change this behaviour by setting this to true.
If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. --> If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. -->
<select_from_information_schema_requires_grant>false</select_from_information_schema_requires_grant> <select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>
<!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from <!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
previous profile. You can change this behaviour by setting this to true. previous profile. You can change this behaviour by setting this to true.
If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint. actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
It also enables 'changeable_in_readonly' constraint type --> It also enables 'changeable_in_readonly' constraint type -->
<settings_constraints_replace_previous>false</settings_constraints_replace_previous> <settings_constraints_replace_previous>true</settings_constraints_replace_previous>
<!-- Number of seconds since last access a role is stored in the Role Cache --> <!-- Number of seconds since last access a role is stored in the Role Cache -->
<role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds> <role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>

View File

@ -1,5 +1,6 @@
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/TerminalSize.h> #include <Common/TerminalSize.h>
#include <Common/re2.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/ReadBufferFromFile.h> #include <IO/ReadBufferFromFile.h>
@ -12,15 +13,6 @@
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <filesystem> #include <filesystem>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace fs = std::filesystem; namespace fs = std::filesystem;
#define EXTRACT_PATH_PATTERN ".*\\/store/(.*)" #define EXTRACT_PATH_PATTERN ".*\\/store/(.*)"

View File

@ -24,20 +24,12 @@
#include <Storages/MergeTree/MergeTreeSettings.h> #include <Storages/MergeTree/MergeTreeSettings.h>
#include <base/defines.h> #include <base/defines.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <Common/re2.h>
#include <Poco/AccessExpireCache.h> #include <Poco/AccessExpireCache.h>
#include <boost/algorithm/string/join.hpp> #include <boost/algorithm/string/join.hpp>
#include <filesystem> #include <filesystem>
#include <mutex> #include <mutex>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace DB namespace DB
{ {
namespace ErrorCodes namespace ErrorCodes

View File

@ -200,6 +200,7 @@ enum class AccessType
M(SYSTEM_UNFREEZE, "SYSTEM UNFREEZE", GLOBAL, SYSTEM) \ M(SYSTEM_UNFREEZE, "SYSTEM UNFREEZE", GLOBAL, SYSTEM) \
M(SYSTEM_FAILPOINT, "SYSTEM ENABLE FAILPOINT, SYSTEM DISABLE FAILPOINT", GLOBAL, SYSTEM) \ M(SYSTEM_FAILPOINT, "SYSTEM ENABLE FAILPOINT, SYSTEM DISABLE FAILPOINT", GLOBAL, SYSTEM) \
M(SYSTEM_LISTEN, "SYSTEM START LISTEN, SYSTEM STOP LISTEN", GLOBAL, SYSTEM) \ M(SYSTEM_LISTEN, "SYSTEM START LISTEN, SYSTEM STOP LISTEN", GLOBAL, SYSTEM) \
M(SYSTEM_JEMALLOC, "SYSTEM JEMALLOC PURGE, SYSTEM JEMALLOC ENABLE PROFILE, SYSTEM JEMALLOC DISABLE PROFILE, SYSTEM JEMALLOC FLUSH PROFILE", GLOBAL, SYSTEM) \
M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \ M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \
\ \
M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\ M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\

View File

@ -3,15 +3,7 @@
#include <Analyzer/Identifier.h> #include <Analyzer/Identifier.h>
#include <Analyzer/IQueryTreeNode.h> #include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/ListNode.h> #include <Analyzer/ListNode.h>
#include <Common/re2.h>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace DB namespace DB
{ {

View File

@ -4,15 +4,7 @@
#include <Analyzer/IQueryTreeNode.h> #include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/ColumnTransformers.h> #include <Analyzer/ColumnTransformers.h>
#include <Parsers/ASTAsterisk.h> #include <Parsers/ASTAsterisk.h>
#include <Common/re2.h>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace DB namespace DB
{ {

View File

@ -64,39 +64,43 @@ public:
auto lhs_argument_node_type = lhs_argument->getNodeType(); auto lhs_argument_node_type = lhs_argument->getNodeType();
auto rhs_argument_node_type = rhs_argument->getNodeType(); auto rhs_argument_node_type = rhs_argument->getNodeType();
QueryTreeNodePtr candidate;
if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::FUNCTION) if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::FUNCTION)
tryOptimizeComparisonTupleFunctions(node, lhs_argument, rhs_argument, comparison_function_name); candidate = tryOptimizeComparisonTupleFunctions(lhs_argument, rhs_argument, comparison_function_name);
else if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::CONSTANT) else if (lhs_argument_node_type == QueryTreeNodeType::FUNCTION && rhs_argument_node_type == QueryTreeNodeType::CONSTANT)
tryOptimizeComparisonTupleFunctionAndConstant(node, lhs_argument, rhs_argument, comparison_function_name); candidate = tryOptimizeComparisonTupleFunctionAndConstant(lhs_argument, rhs_argument, comparison_function_name);
else if (lhs_argument_node_type == QueryTreeNodeType::CONSTANT && rhs_argument_node_type == QueryTreeNodeType::FUNCTION) else if (lhs_argument_node_type == QueryTreeNodeType::CONSTANT && rhs_argument_node_type == QueryTreeNodeType::FUNCTION)
tryOptimizeComparisonTupleFunctionAndConstant(node, rhs_argument, lhs_argument, comparison_function_name); candidate = tryOptimizeComparisonTupleFunctionAndConstant(rhs_argument, lhs_argument, comparison_function_name);
if (candidate != nullptr && node->getResultType()->equals(*candidate->getResultType()))
node = candidate;
} }
private: private:
void tryOptimizeComparisonTupleFunctions(QueryTreeNodePtr & node, QueryTreeNodePtr tryOptimizeComparisonTupleFunctions(
const QueryTreeNodePtr & lhs_function_node, const QueryTreeNodePtr & lhs_function_node,
const QueryTreeNodePtr & rhs_function_node, const QueryTreeNodePtr & rhs_function_node,
const std::string & comparison_function_name) const const std::string & comparison_function_name) const
{ {
const auto & lhs_function_node_typed = lhs_function_node->as<FunctionNode &>(); const auto & lhs_function_node_typed = lhs_function_node->as<FunctionNode &>();
if (lhs_function_node_typed.getFunctionName() != "tuple") if (lhs_function_node_typed.getFunctionName() != "tuple")
return; return {};
const auto & rhs_function_node_typed = rhs_function_node->as<FunctionNode &>(); const auto & rhs_function_node_typed = rhs_function_node->as<FunctionNode &>();
if (rhs_function_node_typed.getFunctionName() != "tuple") if (rhs_function_node_typed.getFunctionName() != "tuple")
return; return {};
const auto & lhs_tuple_function_arguments_nodes = lhs_function_node_typed.getArguments().getNodes(); const auto & lhs_tuple_function_arguments_nodes = lhs_function_node_typed.getArguments().getNodes();
size_t lhs_tuple_function_arguments_nodes_size = lhs_tuple_function_arguments_nodes.size(); size_t lhs_tuple_function_arguments_nodes_size = lhs_tuple_function_arguments_nodes.size();
const auto & rhs_tuple_function_arguments_nodes = rhs_function_node_typed.getArguments().getNodes(); const auto & rhs_tuple_function_arguments_nodes = rhs_function_node_typed.getArguments().getNodes();
if (lhs_tuple_function_arguments_nodes_size != rhs_tuple_function_arguments_nodes.size()) if (lhs_tuple_function_arguments_nodes_size != rhs_tuple_function_arguments_nodes.size())
return; return {};
if (lhs_tuple_function_arguments_nodes_size == 1) if (lhs_tuple_function_arguments_nodes_size == 1)
{ {
node = makeComparisonFunction(lhs_tuple_function_arguments_nodes[0], rhs_tuple_function_arguments_nodes[0], comparison_function_name); return makeComparisonFunction(lhs_tuple_function_arguments_nodes[0], rhs_tuple_function_arguments_nodes[0], comparison_function_name);
return;
} }
QueryTreeNodes tuple_arguments_equals_functions; QueryTreeNodes tuple_arguments_equals_functions;
@ -108,45 +112,44 @@ private:
tuple_arguments_equals_functions.push_back(std::move(equals_function)); tuple_arguments_equals_functions.push_back(std::move(equals_function));
} }
node = makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name); return makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
} }
void tryOptimizeComparisonTupleFunctionAndConstant(QueryTreeNodePtr & node, QueryTreeNodePtr tryOptimizeComparisonTupleFunctionAndConstant(
const QueryTreeNodePtr & function_node, const QueryTreeNodePtr & function_node,
const QueryTreeNodePtr & constant_node, const QueryTreeNodePtr & constant_node,
const std::string & comparison_function_name) const const std::string & comparison_function_name) const
{ {
const auto & function_node_typed = function_node->as<FunctionNode &>(); const auto & function_node_typed = function_node->as<FunctionNode &>();
if (function_node_typed.getFunctionName() != "tuple") if (function_node_typed.getFunctionName() != "tuple")
return; return {};
auto & constant_node_typed = constant_node->as<ConstantNode &>(); auto & constant_node_typed = constant_node->as<ConstantNode &>();
const auto & constant_node_value = constant_node_typed.getValue(); const auto & constant_node_value = constant_node_typed.getValue();
if (constant_node_value.getType() != Field::Types::Which::Tuple) if (constant_node_value.getType() != Field::Types::Which::Tuple)
return; return {};
const auto & constant_tuple = constant_node_value.get<const Tuple &>(); const auto & constant_tuple = constant_node_value.get<const Tuple &>();
const auto & function_arguments_nodes = function_node_typed.getArguments().getNodes(); const auto & function_arguments_nodes = function_node_typed.getArguments().getNodes();
size_t function_arguments_nodes_size = function_arguments_nodes.size(); size_t function_arguments_nodes_size = function_arguments_nodes.size();
if (function_arguments_nodes_size != constant_tuple.size()) if (function_arguments_nodes_size != constant_tuple.size())
return; return {};
auto constant_node_result_type = constant_node_typed.getResultType(); auto constant_node_result_type = constant_node_typed.getResultType();
const auto * tuple_data_type = typeid_cast<const DataTypeTuple *>(constant_node_result_type.get()); const auto * tuple_data_type = typeid_cast<const DataTypeTuple *>(constant_node_result_type.get());
if (!tuple_data_type) if (!tuple_data_type)
return; return {};
const auto & tuple_data_type_elements = tuple_data_type->getElements(); const auto & tuple_data_type_elements = tuple_data_type->getElements();
if (tuple_data_type_elements.size() != function_arguments_nodes_size) if (tuple_data_type_elements.size() != function_arguments_nodes_size)
return; return {};
if (function_arguments_nodes_size == 1) if (function_arguments_nodes_size == 1)
{ {
auto comparison_argument_constant_value = std::make_shared<ConstantValue>(constant_tuple[0], tuple_data_type_elements[0]); auto comparison_argument_constant_value = std::make_shared<ConstantValue>(constant_tuple[0], tuple_data_type_elements[0]);
auto comparison_argument_constant_node = std::make_shared<ConstantNode>(std::move(comparison_argument_constant_value)); auto comparison_argument_constant_node = std::make_shared<ConstantNode>(std::move(comparison_argument_constant_value));
node = makeComparisonFunction(function_arguments_nodes[0], std::move(comparison_argument_constant_node), comparison_function_name); return makeComparisonFunction(function_arguments_nodes[0], std::move(comparison_argument_constant_node), comparison_function_name);
return;
} }
QueryTreeNodes tuple_arguments_equals_functions; QueryTreeNodes tuple_arguments_equals_functions;
@ -160,7 +163,7 @@ private:
tuple_arguments_equals_functions.push_back(std::move(equals_function)); tuple_arguments_equals_functions.push_back(std::move(equals_function));
} }
node = makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name); return makeEquivalentTupleComparisonFunction(std::move(tuple_arguments_equals_functions), comparison_function_name);
} }
QueryTreeNodePtr makeEquivalentTupleComparisonFunction(QueryTreeNodes tuple_arguments_equals_functions, QueryTreeNodePtr makeEquivalentTupleComparisonFunction(QueryTreeNodes tuple_arguments_equals_functions,

View File

@ -61,6 +61,8 @@ public:
return; return;
auto & count_distinct_argument_column = count_distinct_arguments_nodes[0]; auto & count_distinct_argument_column = count_distinct_arguments_nodes[0];
if (count_distinct_argument_column->getNodeType() != QueryTreeNodeType::COLUMN)
return;
auto & count_distinct_argument_column_typed = count_distinct_argument_column->as<ColumnNode &>(); auto & count_distinct_argument_column_typed = count_distinct_argument_column->as<ColumnNode &>();
/// Build subquery SELECT count_distinct_argument_column FROM table_expression GROUP BY count_distinct_argument_column /// Build subquery SELECT count_distinct_argument_column FROM table_expression GROUP BY count_distinct_argument_column

View File

@ -396,7 +396,7 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
String backup_name_for_logging = backup_info.toStringForLogging(); String backup_name_for_logging = backup_info.toStringForLogging();
String base_backup_name; String base_backup_name;
if (backup_settings.base_backup_info) if (backup_settings.base_backup_info)
base_backup_name = backup_settings.base_backup_info->toString(); base_backup_name = backup_settings.base_backup_info->toStringForLogging();
try try
{ {
@ -750,7 +750,7 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
String backup_name_for_logging = backup_info.toStringForLogging(); String backup_name_for_logging = backup_info.toStringForLogging();
String base_backup_name; String base_backup_name;
if (restore_settings.base_backup_info) if (restore_settings.base_backup_info)
base_backup_name = restore_settings.base_backup_info->toString(); base_backup_name = restore_settings.base_backup_info->toStringForLogging();
addInfo(restore_id, backup_name_for_logging, base_backup_name, restore_settings.internal, BackupStatus::RESTORING); addInfo(restore_id, backup_name_for_logging, base_backup_name, restore_settings.internal, BackupStatus::RESTORING);

View File

@ -551,13 +551,18 @@ endif ()
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4)
if (TARGET ch_contrib::qpl) if (TARGET ch_contrib::qpl)
dbms_target_link_libraries(PUBLIC ch_contrib::qpl) dbms_target_link_libraries(PUBLIC ch_contrib::qpl)
endif () endif ()
if (TARGET ch_contrib::accel-config) if (TARGET ch_contrib::accel-config)
dbms_target_link_libraries(PUBLIC ch_contrib::accel-config) dbms_target_link_libraries(PUBLIC ch_contrib::accel-config)
endif () endif ()
if (TARGET ch_contrib::qatzstd_plugin)
dbms_target_link_libraries(PUBLIC ch_contrib::qatzstd_plugin)
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::qatzstd_plugin)
endif ()
target_link_libraries(clickhouse_common_io PUBLIC boost::context) target_link_libraries(clickhouse_common_io PUBLIC boost::context)
dbms_target_link_libraries(PUBLIC boost::context) dbms_target_link_libraries(PUBLIC boost::context)

View File

@ -651,7 +651,13 @@ void Connection::sendQuery(
if (method == "ZSTD") if (method == "ZSTD")
level = settings->network_zstd_compression_level; level = settings->network_zstd_compression_level;
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec); CompressionCodecFactory::instance().validateCodec(
method,
level,
!settings->allow_suspicious_codecs,
settings->allow_experimental_codecs,
settings->enable_deflate_qpl_codec,
settings->enable_zstd_qat_codec);
compression_codec = CompressionCodecFactory::instance().get(method, level); compression_codec = CompressionCodecFactory::instance().get(method, level);
} }
else else

View File

@ -118,18 +118,18 @@ ConnectionPoolWithFailover::Status ConnectionPoolWithFailover::getStatus() const
return result; return result;
} }
std::vector<IConnectionPool::Entry> ConnectionPoolWithFailover::getMany(const ConnectionTimeouts & timeouts, std::vector<IConnectionPool::Entry> ConnectionPoolWithFailover::getMany(
const Settings & settings, const ConnectionTimeouts & timeouts,
PoolMode pool_mode, const Settings & settings,
AsyncCallback async_callback, PoolMode pool_mode,
std::optional<bool> skip_unavailable_endpoints) AsyncCallback async_callback,
std::optional<bool> skip_unavailable_endpoints,
GetPriorityForLoadBalancing::Func priority_func)
{ {
TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message) TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message)
{ { return tryGetEntry(pool, timeouts, fail_message, settings, nullptr, async_callback); };
return tryGetEntry(pool, timeouts, fail_message, settings, nullptr, async_callback);
};
std::vector<TryResult> results = getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints); std::vector<TryResult> results = getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints, priority_func);
std::vector<Entry> entries; std::vector<Entry> entries;
entries.reserve(results.size()); entries.reserve(results.size());
@ -153,17 +153,17 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyChecked( std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyChecked(
const ConnectionTimeouts & timeouts, const ConnectionTimeouts & timeouts,
const Settings & settings, PoolMode pool_mode, const Settings & settings,
PoolMode pool_mode,
const QualifiedTableName & table_to_check, const QualifiedTableName & table_to_check,
AsyncCallback async_callback, AsyncCallback async_callback,
std::optional<bool> skip_unavailable_endpoints) std::optional<bool> skip_unavailable_endpoints,
GetPriorityForLoadBalancing::Func priority_func)
{ {
TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message) TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message)
{ { return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, async_callback); };
return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, async_callback);
};
return getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints); return getManyImpl(settings, pool_mode, try_get_entry, skip_unavailable_endpoints, priority_func);
} }
ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::makeGetPriorityFunc(const Settings & settings) ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::makeGetPriorityFunc(const Settings & settings)
@ -175,14 +175,16 @@ ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::ma
} }
std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyImpl( std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyImpl(
const Settings & settings, const Settings & settings,
PoolMode pool_mode, PoolMode pool_mode,
const TryGetEntryFunc & try_get_entry, const TryGetEntryFunc & try_get_entry,
std::optional<bool> skip_unavailable_endpoints) std::optional<bool> skip_unavailable_endpoints,
GetPriorityForLoadBalancing::Func priority_func)
{ {
if (nested_pools.empty()) if (nested_pools.empty())
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, throw DB::Exception(
"Cannot get connection from ConnectionPoolWithFailover cause nested pools are empty"); DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED,
"Cannot get connection from ConnectionPoolWithFailover cause nested pools are empty");
if (!skip_unavailable_endpoints.has_value()) if (!skip_unavailable_endpoints.has_value())
skip_unavailable_endpoints = settings.skip_unavailable_shards; skip_unavailable_endpoints = settings.skip_unavailable_shards;
@ -203,14 +205,13 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
else else
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode"); throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode");
GetPriorityFunc get_priority = makeGetPriorityFunc(settings); if (!priority_func)
priority_func = makeGetPriorityFunc(settings);
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value; UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
bool fallback_to_stale_replicas = settings.fallback_to_stale_replicas_for_distributed_queries.value; bool fallback_to_stale_replicas = settings.fallback_to_stale_replicas_for_distributed_queries.value;
return Base::getMany(min_entries, max_entries, max_tries, return Base::getMany(min_entries, max_entries, max_tries, max_ignored_errors, fallback_to_stale_replicas, try_get_entry, priority_func);
max_ignored_errors, fallback_to_stale_replicas,
try_get_entry, get_priority);
} }
ConnectionPoolWithFailover::TryResult ConnectionPoolWithFailover::TryResult
@ -251,11 +252,14 @@ ConnectionPoolWithFailover::tryGetEntry(
return result; return result;
} }
std::vector<ConnectionPoolWithFailover::Base::ShuffledPool> ConnectionPoolWithFailover::getShuffledPools(const Settings & settings) std::vector<ConnectionPoolWithFailover::Base::ShuffledPool>
ConnectionPoolWithFailover::getShuffledPools(const Settings & settings, GetPriorityForLoadBalancing::Func priority_func)
{ {
GetPriorityFunc get_priority = makeGetPriorityFunc(settings); if (!priority_func)
priority_func = makeGetPriorityFunc(settings);
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value; UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
return Base::getShuffledPools(max_ignored_errors, get_priority); return Base::getShuffledPools(max_ignored_errors, priority_func);
} }
} }

View File

@ -54,10 +54,13 @@ public:
/** Allocates up to the specified number of connections to work. /** Allocates up to the specified number of connections to work.
* Connections provide access to different replicas of one shard. * Connections provide access to different replicas of one shard.
*/ */
std::vector<Entry> getMany(const ConnectionTimeouts & timeouts, std::vector<Entry> getMany(
const Settings & settings, PoolMode pool_mode, const ConnectionTimeouts & timeouts,
AsyncCallback async_callback = {}, const Settings & settings,
std::optional<bool> skip_unavailable_endpoints = std::nullopt); PoolMode pool_mode,
AsyncCallback async_callback = {},
std::optional<bool> skip_unavailable_endpoints = std::nullopt,
GetPriorityForLoadBalancing::Func priority_func = {});
/// The same as getMany(), but return std::vector<TryResult>. /// The same as getMany(), but return std::vector<TryResult>.
std::vector<TryResult> getManyForTableFunction(const ConnectionTimeouts & timeouts, std::vector<TryResult> getManyForTableFunction(const ConnectionTimeouts & timeouts,
@ -69,12 +72,13 @@ public:
/// The same as getMany(), but check that replication delay for table_to_check is acceptable. /// The same as getMany(), but check that replication delay for table_to_check is acceptable.
/// Delay threshold is taken from settings. /// Delay threshold is taken from settings.
std::vector<TryResult> getManyChecked( std::vector<TryResult> getManyChecked(
const ConnectionTimeouts & timeouts, const ConnectionTimeouts & timeouts,
const Settings & settings, const Settings & settings,
PoolMode pool_mode, PoolMode pool_mode,
const QualifiedTableName & table_to_check, const QualifiedTableName & table_to_check,
AsyncCallback async_callback = {}, AsyncCallback async_callback = {},
std::optional<bool> skip_unavailable_endpoints = std::nullopt); std::optional<bool> skip_unavailable_endpoints = std::nullopt,
GetPriorityForLoadBalancing::Func priority_func = {});
struct NestedPoolStatus struct NestedPoolStatus
{ {
@ -87,7 +91,7 @@ public:
using Status = std::vector<NestedPoolStatus>; using Status = std::vector<NestedPoolStatus>;
Status getStatus() const; Status getStatus() const;
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings); std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {});
size_t getMaxErrorCup() const { return Base::max_error_cap; } size_t getMaxErrorCup() const { return Base::max_error_cap; }
@ -96,13 +100,16 @@ public:
Base::updateSharedErrorCounts(shuffled_pools); Base::updateSharedErrorCounts(shuffled_pools);
} }
size_t getPoolSize() const { return Base::getPoolSize(); }
private: private:
/// Get the values of relevant settings and call Base::getMany() /// Get the values of relevant settings and call Base::getMany()
std::vector<TryResult> getManyImpl( std::vector<TryResult> getManyImpl(
const Settings & settings, const Settings & settings,
PoolMode pool_mode, PoolMode pool_mode,
const TryGetEntryFunc & try_get_entry, const TryGetEntryFunc & try_get_entry,
std::optional<bool> skip_unavailable_endpoints = std::nullopt); std::optional<bool> skip_unavailable_endpoints = std::nullopt,
GetPriorityForLoadBalancing::Func priority_func = {});
/// Try to get a connection from the pool and check that it is good. /// Try to get a connection from the pool and check that it is good.
/// If table_to_check is not null and the check is enabled in settings, check that replication delay /// If table_to_check is not null and the check is enabled in settings, check that replication delay
@ -115,7 +122,7 @@ private:
const QualifiedTableName * table_to_check = nullptr, const QualifiedTableName * table_to_check = nullptr,
AsyncCallback async_callback = {}); AsyncCallback async_callback = {});
GetPriorityFunc makeGetPriorityFunc(const Settings & settings); GetPriorityForLoadBalancing::Func makeGetPriorityFunc(const Settings & settings);
GetPriorityForLoadBalancing get_priority_load_balancing; GetPriorityForLoadBalancing get_priority_load_balancing;
}; };

View File

@ -28,16 +28,18 @@ HedgedConnections::HedgedConnections(
const ThrottlerPtr & throttler_, const ThrottlerPtr & throttler_,
PoolMode pool_mode, PoolMode pool_mode,
std::shared_ptr<QualifiedTableName> table_to_check_, std::shared_ptr<QualifiedTableName> table_to_check_,
AsyncCallback async_callback) AsyncCallback async_callback,
GetPriorityForLoadBalancing::Func priority_func)
: hedged_connections_factory( : hedged_connections_factory(
pool_, pool_,
context_->getSettingsRef(), context_->getSettingsRef(),
timeouts_, timeouts_,
context_->getSettingsRef().connections_with_failover_max_tries.value, context_->getSettingsRef().connections_with_failover_max_tries.value,
context_->getSettingsRef().fallback_to_stale_replicas_for_distributed_queries.value, context_->getSettingsRef().fallback_to_stale_replicas_for_distributed_queries.value,
context_->getSettingsRef().max_parallel_replicas.value, context_->getSettingsRef().max_parallel_replicas.value,
context_->getSettingsRef().skip_unavailable_shards.value, context_->getSettingsRef().skip_unavailable_shards.value,
table_to_check_) table_to_check_,
priority_func)
, context(std::move(context_)) , context(std::move(context_))
, settings(context->getSettingsRef()) , settings(context->getSettingsRef())
, throttler(throttler_) , throttler(throttler_)

View File

@ -70,13 +70,15 @@ public:
size_t index; size_t index;
}; };
HedgedConnections(const ConnectionPoolWithFailoverPtr & pool_, HedgedConnections(
ContextPtr context_, const ConnectionPoolWithFailoverPtr & pool_,
const ConnectionTimeouts & timeouts_, ContextPtr context_,
const ThrottlerPtr & throttler, const ConnectionTimeouts & timeouts_,
PoolMode pool_mode, const ThrottlerPtr & throttler,
std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr, PoolMode pool_mode,
AsyncCallback async_callback = {}); std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr,
AsyncCallback async_callback = {},
GetPriorityForLoadBalancing::Func priority_func = {});
void sendScalarsData(Scalars & data) override; void sendScalarsData(Scalars & data) override;

View File

@ -29,7 +29,8 @@ HedgedConnectionsFactory::HedgedConnectionsFactory(
bool fallback_to_stale_replicas_, bool fallback_to_stale_replicas_,
UInt64 max_parallel_replicas_, UInt64 max_parallel_replicas_,
bool skip_unavailable_shards_, bool skip_unavailable_shards_,
std::shared_ptr<QualifiedTableName> table_to_check_) std::shared_ptr<QualifiedTableName> table_to_check_,
GetPriorityForLoadBalancing::Func priority_func)
: pool(pool_) : pool(pool_)
, timeouts(timeouts_) , timeouts(timeouts_)
, table_to_check(table_to_check_) , table_to_check(table_to_check_)
@ -39,7 +40,7 @@ HedgedConnectionsFactory::HedgedConnectionsFactory(
, max_parallel_replicas(max_parallel_replicas_) , max_parallel_replicas(max_parallel_replicas_)
, skip_unavailable_shards(skip_unavailable_shards_) , skip_unavailable_shards(skip_unavailable_shards_)
{ {
shuffled_pools = pool->getShuffledPools(settings_); shuffled_pools = pool->getShuffledPools(settings_, priority_func);
for (auto shuffled_pool : shuffled_pools) for (auto shuffled_pool : shuffled_pools)
replicas.emplace_back(std::make_unique<ConnectionEstablisherAsync>(shuffled_pool.pool, &timeouts, settings_, log, table_to_check.get())); replicas.emplace_back(std::make_unique<ConnectionEstablisherAsync>(shuffled_pool.pool, &timeouts, settings_, log, table_to_check.get()));
} }
@ -323,8 +324,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
else else
{ {
ShuffledPool & shuffled_pool = shuffled_pools[index]; ShuffledPool & shuffled_pool = shuffled_pools[index];
LOG_WARNING( LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry); ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1); shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);

View File

@ -53,7 +53,8 @@ public:
bool fallback_to_stale_replicas_, bool fallback_to_stale_replicas_,
UInt64 max_parallel_replicas_, UInt64 max_parallel_replicas_,
bool skip_unavailable_shards_, bool skip_unavailable_shards_,
std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr); std::shared_ptr<QualifiedTableName> table_to_check_ = nullptr,
GetPriorityForLoadBalancing::Func priority_func = {});
/// Create and return active connections according to pool_mode. /// Create and return active connections according to pool_mode.
std::vector<Connection *> getManyConnections(PoolMode pool_mode, AsyncCallback async_callback = {}); std::vector<Connection *> getManyConnections(PoolMode pool_mode, AsyncCallback async_callback = {});

View File

@ -48,11 +48,11 @@ void prefaultPages([[maybe_unused]] void * buf_, [[maybe_unused]] size_t len_)
return; return;
auto [buf, len] = adjustToPageSize(buf_, len_, page_size); auto [buf, len] = adjustToPageSize(buf_, len_, page_size);
if (auto res = ::madvise(buf, len, MADV_POPULATE_WRITE); res < 0) if (::madvise(buf, len, MADV_POPULATE_WRITE) < 0)
LOG_TRACE( LOG_TRACE(
LogFrequencyLimiter(&Poco::Logger::get("Allocator"), 1), LogFrequencyLimiter(&Poco::Logger::get("Allocator"), 1),
"Attempt to populate pages failed: {} (EINVAL is expected for kernels < 5.14)", "Attempt to populate pages failed: {} (EINVAL is expected for kernels < 5.14)",
errnoToString(res)); errnoToString(errno));
#endif #endif
} }

View File

@ -43,6 +43,19 @@ void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, Atomic
} }
} }
void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)
{
cancel = std::make_exception_ptr(Exception(ErrorCodes::ASYNC_LOAD_CANCELED,
"Load job '{}' -> {}",
self->name,
getExceptionMessage(dependency->exception(), /* with_stacktrace = */ false)));
}
void ignoreDependencyFailure(const LoadJobPtr &, const LoadJobPtr &, std::exception_ptr &)
{
// No-op
}
LoadStatus LoadJob::status() const LoadStatus LoadJob::status() const
{ {
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
@ -96,7 +109,10 @@ size_t LoadJob::canceled(const std::exception_ptr & ptr)
size_t LoadJob::finish() size_t LoadJob::finish()
{ {
func = {}; // To ensure job function is destructed before `AsyncLoader::wait()` return // To ensure functions are destructed before `AsyncLoader::wait()` return
func = {};
dependency_failure = {};
finish_time = std::chrono::system_clock::now(); finish_time = std::chrono::system_clock::now();
if (waiters > 0) if (waiters > 0)
finished.notify_all(); finished.notify_all();
@ -327,17 +343,19 @@ void AsyncLoader::schedule(const LoadJobSet & jobs_to_schedule)
if (dep_status == LoadStatus::FAILED || dep_status == LoadStatus::CANCELED) if (dep_status == LoadStatus::FAILED || dep_status == LoadStatus::CANCELED)
{ {
// Dependency on already failed or canceled job -- it's okay. Cancel all dependent jobs. // Dependency on already failed or canceled job -- it's okay.
std::exception_ptr e; // Process as usual (may lead to cancel of all dependent jobs).
std::exception_ptr cancel;
NOEXCEPT_SCOPE({ NOEXCEPT_SCOPE({
ALLOW_ALLOCATIONS_IN_SCOPE; ALLOW_ALLOCATIONS_IN_SCOPE;
e = std::make_exception_ptr(Exception(ErrorCodes::ASYNC_LOAD_CANCELED, if (job->dependency_failure)
"Load job '{}' -> {}", job->dependency_failure(job, dep, cancel);
job->name,
getExceptionMessage(dep->exception(), /* with_stacktrace = */ false)));
}); });
finish(job, LoadStatus::CANCELED, e, lock); if (cancel)
break; // This job is now finished, stop its dependencies processing {
finish(job, LoadStatus::CANCELED, cancel, lock);
break; // This job is now finished, stop its dependencies processing
}
} }
} }
} }
@ -515,63 +533,76 @@ String AsyncLoader::checkCycle(const LoadJobPtr & job, LoadJobSet & left, LoadJo
return {}; return {};
} }
void AsyncLoader::finish(const LoadJobPtr & job, LoadStatus status, std::exception_ptr exception_from_job, std::unique_lock<std::mutex> & lock) void AsyncLoader::finish(const LoadJobPtr & job, LoadStatus status, std::exception_ptr reason, std::unique_lock<std::mutex> & lock)
{ {
chassert(scheduled_jobs.contains(job)); // Job was pending chassert(scheduled_jobs.contains(job)); // Job was pending
// Notify waiters
size_t resumed_workers = 0; // Number of workers resumed in the execution pool of the job size_t resumed_workers = 0; // Number of workers resumed in the execution pool of the job
if (status == LoadStatus::OK) if (status == LoadStatus::OK)
{ resumed_workers = job->ok();
// Notify waiters else if (status == LoadStatus::FAILED)
resumed_workers += job->ok(); resumed_workers = job->failed(reason);
else if (status == LoadStatus::CANCELED)
resumed_workers = job->canceled(reason);
// Update dependent jobs and enqueue if ready // Adjust suspended workers count
for (const auto & dep : scheduled_jobs[job].dependent_jobs) if (resumed_workers)
{
Pool & pool = pools[job->executionPool()];
pool.suspended_workers -= resumed_workers;
}
Info & info = scheduled_jobs[job];
if (info.isReady())
{
// Job could be in ready queue (on cancel) -- must be dequeued
pools[job->pool_id].ready_queue.erase(info.ready_seqno);
info.ready_seqno = 0;
}
// To avoid container modification during recursion (during clean dependency graph edges below)
LoadJobSet dependent;
dependent.swap(info.dependent_jobs);
// Update dependent jobs
for (const auto & dpt : dependent)
{
if (auto dpt_info = scheduled_jobs.find(dpt); dpt_info != scheduled_jobs.end())
{ {
chassert(scheduled_jobs.contains(dep)); // All depended jobs must be pending dpt_info->second.dependencies_left--;
Info & dep_info = scheduled_jobs[dep]; if (!dpt_info->second.isBlocked())
dep_info.dependencies_left--; enqueue(dpt_info->second, dpt, lock);
if (!dep_info.isBlocked())
enqueue(dep_info, dep, lock); if (status != LoadStatus::OK)
{
std::exception_ptr cancel;
NOEXCEPT_SCOPE({
ALLOW_ALLOCATIONS_IN_SCOPE;
if (dpt->dependency_failure)
dpt->dependency_failure(dpt, job, cancel);
});
// Recurse into dependent job if it should be canceled
if (cancel)
finish(dpt, LoadStatus::CANCELED, cancel, lock);
}
}
else
{
// Job has already been canceled. Do not enter twice into the same job during finish recursion.
// This happens in {A<-B; A<-C; B<-D; C<-D} graph for D if A is failed or canceled.
chassert(status == LoadStatus::CANCELED);
} }
} }
else
// Clean dependency graph edges pointing to canceled jobs
if (status != LoadStatus::OK)
{ {
// Notify waiters
if (status == LoadStatus::FAILED)
resumed_workers += job->failed(exception_from_job);
else if (status == LoadStatus::CANCELED)
resumed_workers += job->canceled(exception_from_job);
Info & info = scheduled_jobs[job];
if (info.isReady())
{
pools[job->pool_id].ready_queue.erase(info.ready_seqno);
info.ready_seqno = 0;
}
// Recurse into all dependent jobs
LoadJobSet dependent;
dependent.swap(info.dependent_jobs); // To avoid container modification during recursion
for (const auto & dep : dependent)
{
if (!scheduled_jobs.contains(dep))
continue; // Job has already been canceled
std::exception_ptr e;
NOEXCEPT_SCOPE({
ALLOW_ALLOCATIONS_IN_SCOPE;
e = std::make_exception_ptr(
Exception(ErrorCodes::ASYNC_LOAD_CANCELED,
"Load job '{}' -> {}",
dep->name,
getExceptionMessage(exception_from_job, /* with_stacktrace = */ false)));
});
finish(dep, LoadStatus::CANCELED, e, lock);
}
// Clean dependency graph edges pointing to canceled jobs
for (const auto & dep : job->dependencies) for (const auto & dep : job->dependencies)
{
if (auto dep_info = scheduled_jobs.find(dep); dep_info != scheduled_jobs.end()) if (auto dep_info = scheduled_jobs.find(dep); dep_info != scheduled_jobs.end())
dep_info->second.dependent_jobs.erase(job); dep_info->second.dependent_jobs.erase(job);
}
} }
// Job became finished // Job became finished
@ -582,12 +613,6 @@ void AsyncLoader::finish(const LoadJobPtr & job, LoadStatus status, std::excepti
if (log_progress) if (log_progress)
logAboutProgress(log, finished_jobs.size() - old_jobs, finished_jobs.size() + scheduled_jobs.size() - old_jobs, stopwatch); logAboutProgress(log, finished_jobs.size() - old_jobs, finished_jobs.size() + scheduled_jobs.size() - old_jobs, stopwatch);
}); });
if (resumed_workers)
{
Pool & pool = pools[job->executionPool()];
pool.suspended_workers -= resumed_workers;
}
} }
void AsyncLoader::prioritize(const LoadJobPtr & job, size_t new_pool_id, std::unique_lock<std::mutex> & lock) void AsyncLoader::prioritize(const LoadJobPtr & job, size_t new_pool_id, std::unique_lock<std::mutex> & lock)
@ -612,6 +637,9 @@ void AsyncLoader::prioritize(const LoadJobPtr & job, size_t new_pool_id, std::un
} }
job->pool_id.store(new_pool_id); job->pool_id.store(new_pool_id);
// TODO(serxa): we should adjust suspended_workers and suspended_waiters here.
// Otherwise suspended_workers we be left inconsistent. Fix it and add a test.
// Scenario: schedule a job A, wait for it from a job B in the same pool, prioritize A
// Recurse into dependencies // Recurse into dependencies
for (const auto & dep : job->dependencies) for (const auto & dep : job->dependencies)

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <condition_variable> #include <condition_variable>
#include <concepts>
#include <exception> #include <exception>
#include <memory> #include <memory>
#include <map> #include <map>
@ -57,12 +58,13 @@ enum class LoadStatus
class LoadJob : private boost::noncopyable class LoadJob : private boost::noncopyable
{ {
public: public:
template <class Func, class LoadJobSetType> template <class LoadJobSetType, class Func, class DFFunc>
LoadJob(LoadJobSetType && dependencies_, String name_, size_t pool_id_, Func && func_) LoadJob(LoadJobSetType && dependencies_, String name_, size_t pool_id_, DFFunc && dependency_failure_, Func && func_)
: dependencies(std::forward<LoadJobSetType>(dependencies_)) : dependencies(std::forward<LoadJobSetType>(dependencies_))
, name(std::move(name_)) , name(std::move(name_))
, execution_pool_id(pool_id_) , execution_pool_id(pool_id_)
, pool_id(pool_id_) , pool_id(pool_id_)
, dependency_failure(std::forward<DFFunc>(dependency_failure_))
, func(std::forward<Func>(func_)) , func(std::forward<Func>(func_))
{} {}
@ -108,6 +110,14 @@ private:
std::atomic<UInt64> job_id{0}; std::atomic<UInt64> job_id{0};
std::atomic<size_t> execution_pool_id; std::atomic<size_t> execution_pool_id;
std::atomic<size_t> pool_id; std::atomic<size_t> pool_id;
// Handler for failed or canceled dependencies.
// If job needs to be canceled on `dependency` failure, then function should set `cancel` to a specific reason.
// Note that implementation should be fast and cannot use AsyncLoader, because it is called under `AsyncLoader::mutex`.
// Note that `dependency_failure` is called only on pending jobs.
std::function<void(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)> dependency_failure;
// Function to be called to execute the job.
std::function<void(AsyncLoader & loader, const LoadJobPtr & self)> func; std::function<void(AsyncLoader & loader, const LoadJobPtr & self)> func;
mutable std::mutex mutex; mutable std::mutex mutex;
@ -123,35 +133,54 @@ private:
std::atomic<TimePoint> finish_time{TimePoint{}}; std::atomic<TimePoint> finish_time{TimePoint{}};
}; };
struct EmptyJobFunc // For LoadJob::dependency_failure. Cancels the job on the first dependency failure or cancel.
{ void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel);
void operator()(AsyncLoader &, const LoadJobPtr &) {}
};
template <class Func = EmptyJobFunc> // For LoadJob::dependency_failure. Never cancels the job due to dependency failure or cancel.
LoadJobPtr makeLoadJob(LoadJobSet && dependencies, String name, Func && func = EmptyJobFunc()) void ignoreDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel);
template <class F> concept LoadJobDependencyFailure = std::invocable<F, const LoadJobPtr &, const LoadJobPtr &, std::exception_ptr &>;
template <class F> concept LoadJobFunc = std::invocable<F, AsyncLoader &, const LoadJobPtr &>;
LoadJobPtr makeLoadJob(LoadJobSet && dependencies, String name, LoadJobDependencyFailure auto && dependency_failure, LoadJobFunc auto && func)
{ {
return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), 0, std::forward<Func>(func)); return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), 0, std::forward<decltype(dependency_failure)>(dependency_failure), std::forward<decltype(func)>(func));
} }
template <class Func = EmptyJobFunc> LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, String name, LoadJobDependencyFailure auto && dependency_failure, LoadJobFunc auto && func)
LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, String name, Func && func = EmptyJobFunc())
{ {
return std::make_shared<LoadJob>(dependencies, std::move(name), 0, std::forward<Func>(func)); return std::make_shared<LoadJob>(dependencies, std::move(name), 0, std::forward<decltype(dependency_failure)>(dependency_failure), std::forward<decltype(func)>(func));
} }
template <class Func = EmptyJobFunc> LoadJobPtr makeLoadJob(LoadJobSet && dependencies, size_t pool_id, String name, LoadJobDependencyFailure auto && dependency_failure, LoadJobFunc auto && func)
LoadJobPtr makeLoadJob(LoadJobSet && dependencies, size_t pool_id, String name, Func && func = EmptyJobFunc())
{ {
return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), pool_id, std::forward<Func>(func)); return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), pool_id, std::forward<decltype(dependency_failure)>(dependency_failure), std::forward<decltype(func)>(func));
} }
template <class Func = EmptyJobFunc> LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, size_t pool_id, String name, LoadJobDependencyFailure auto && dependency_failure, LoadJobFunc auto && func)
LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, size_t pool_id, String name, Func && func = EmptyJobFunc())
{ {
return std::make_shared<LoadJob>(dependencies, std::move(name), pool_id, std::forward<Func>(func)); return std::make_shared<LoadJob>(dependencies, std::move(name), pool_id, std::forward<decltype(dependency_failure)>(dependency_failure), std::forward<decltype(func)>(func));
} }
LoadJobPtr makeLoadJob(LoadJobSet && dependencies, String name, LoadJobFunc auto && func)
{
return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), 0, cancelOnDependencyFailure, std::forward<decltype(func)>(func));
}
LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, String name, LoadJobFunc auto && func)
{
return std::make_shared<LoadJob>(dependencies, std::move(name), 0, cancelOnDependencyFailure, std::forward<decltype(func)>(func));
}
LoadJobPtr makeLoadJob(LoadJobSet && dependencies, size_t pool_id, String name, LoadJobFunc auto && func)
{
return std::make_shared<LoadJob>(std::move(dependencies), std::move(name), pool_id, cancelOnDependencyFailure, std::forward<decltype(func)>(func));
}
LoadJobPtr makeLoadJob(const LoadJobSet & dependencies, size_t pool_id, String name, LoadJobFunc auto && func)
{
return std::make_shared<LoadJob>(dependencies, std::move(name), pool_id, cancelOnDependencyFailure, std::forward<decltype(func)>(func));
}
// Represents a logically connected set of LoadJobs required to achieve some goals (final LoadJob in the set). // Represents a logically connected set of LoadJobs required to achieve some goals (final LoadJob in the set).
class LoadTask : private boost::noncopyable class LoadTask : private boost::noncopyable
@ -277,7 +306,7 @@ private:
{ {
size_t dependencies_left = 0; // Current number of dependencies on pending jobs. size_t dependencies_left = 0; // Current number of dependencies on pending jobs.
UInt64 ready_seqno = 0; // Zero means that job is not in ready queue. UInt64 ready_seqno = 0; // Zero means that job is not in ready queue.
LoadJobSet dependent_jobs; // Set of jobs dependent on this job. LoadJobSet dependent_jobs; // Set of jobs dependent on this job. Contains only scheduled jobs.
// Three independent states of a scheduled job. // Three independent states of a scheduled job.
bool isBlocked() const { return dependencies_left > 0; } bool isBlocked() const { return dependencies_left > 0; }
@ -371,7 +400,7 @@ public:
private: private:
void checkCycle(const LoadJobSet & jobs, std::unique_lock<std::mutex> & lock); void checkCycle(const LoadJobSet & jobs, std::unique_lock<std::mutex> & lock);
String checkCycle(const LoadJobPtr & job, LoadJobSet & left, LoadJobSet & visited, std::unique_lock<std::mutex> & lock); String checkCycle(const LoadJobPtr & job, LoadJobSet & left, LoadJobSet & visited, std::unique_lock<std::mutex> & lock);
void finish(const LoadJobPtr & job, LoadStatus status, std::exception_ptr exception_from_job, std::unique_lock<std::mutex> & lock); void finish(const LoadJobPtr & job, LoadStatus status, std::exception_ptr reason, std::unique_lock<std::mutex> & lock);
void gatherNotScheduled(const LoadJobPtr & job, LoadJobSet & jobs, std::unique_lock<std::mutex> & lock); void gatherNotScheduled(const LoadJobPtr & job, LoadJobSet & jobs, std::unique_lock<std::mutex> & lock);
void prioritize(const LoadJobPtr & job, size_t new_pool_id, std::unique_lock<std::mutex> & lock); void prioritize(const LoadJobPtr & job, size_t new_pool_id, std::unique_lock<std::mutex> & lock);
void enqueue(Info & info, const LoadJobPtr & job, std::unique_lock<std::mutex> & lock); void enqueue(Info & info, const LoadJobPtr & job, std::unique_lock<std::mutex> & lock);

View File

@ -5,15 +5,15 @@
#include <Common/LRUCachePolicy.h> #include <Common/LRUCachePolicy.h>
#include <Common/SLRUCachePolicy.h> #include <Common/SLRUCachePolicy.h>
#include <base/UUID.h>
#include <base/defines.h>
#include <atomic> #include <atomic>
#include <cassert>
#include <chrono>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <optional>
#include <unordered_map> #include <unordered_map>
#include <base/defines.h>
namespace DB namespace DB
{ {
@ -227,10 +227,10 @@ public:
cache_policy->setMaxSizeInBytes(max_size_in_bytes); cache_policy->setMaxSizeInBytes(max_size_in_bytes);
} }
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) void setQuotaForUser(const UUID & user_id, size_t max_size_in_bytes, size_t max_entries)
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
cache_policy->setQuotaForUser(user_name, max_size_in_bytes, max_entries); cache_policy->setQuotaForUser(user_id, max_size_in_bytes, max_entries);
} }
virtual ~CacheBase() = default; virtual ~CacheBase() = default;

View File

@ -1,10 +1,11 @@
#pragma once #pragma once
#include <list>
#include <memory>
#include <mutex>
#include <optional>
#include <base/types.h> #include <base/types.h>
#include <boost/core/noncopyable.hpp> #include <boost/core/noncopyable.hpp>
#include <mutex>
#include <memory>
#include <list>
namespace DB namespace DB

View File

@ -93,7 +93,10 @@ inline bool cpuid(UInt32 op, UInt32 * res) noexcept /// NOLINT
OP(CLFLUSHOPT) \ OP(CLFLUSHOPT) \
OP(CLWB) \ OP(CLWB) \
OP(XSAVE) \ OP(XSAVE) \
OP(OSXSAVE) OP(OSXSAVE) \
OP(AMXBF16) \
OP(AMXTILE) \
OP(AMXINT8)
union CpuInfo union CpuInfo
{ {
@ -313,6 +316,35 @@ bool haveRDRAND() noexcept
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x1).registers.ecx >> 30) & 1u); return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x1).registers.ecx >> 30) & 1u);
} }
inline bool haveAMX() noexcept
{
#if defined(__x86_64__) || defined(__i386__)
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
return haveOSXSAVE() // implies haveXSAVE()
&& ((our_xgetbv(0) >> 17) & 0x3) == 0x3; // AMX state are enabled by OS
#else
return false;
#endif
}
bool haveAMXBF16() noexcept
{
return haveAMX()
&& ((CpuInfo(0x7, 0).registers.edx >> 22) & 1u); // AMX-BF16 bit
}
bool haveAMXTILE() noexcept
{
return haveAMX()
&& ((CpuInfo(0x7, 0).registers.edx >> 24) & 1u); // AMX-TILE bit
}
bool haveAMXINT8() noexcept
{
return haveAMX()
&& ((CpuInfo(0x7, 0).registers.edx >> 25) & 1u); // AMX-INT8 bit
}
struct CpuFlagsCache struct CpuFlagsCache
{ {
#define DEF_NAME(X) static inline bool have_##X = have##X(); #define DEF_NAME(X) static inline bool have_##X = have##X();

View File

@ -242,7 +242,7 @@
M(FilesystemCacheDelayedCleanupElements, "Filesystem cache elements in background cleanup queue") \ M(FilesystemCacheDelayedCleanupElements, "Filesystem cache elements in background cleanup queue") \
M(FilesystemCacheHoldFileSegments, "Filesystem cache file segment which are currently hold as unreleasable") \ M(FilesystemCacheHoldFileSegments, "Filesystem cache file segment which are currently hold as unreleasable") \
M(AsyncInsertCacheSize, "Number of async insert hash id in cache") \ M(AsyncInsertCacheSize, "Number of async insert hash id in cache") \
M(S3Requests, "S3 requests") \ M(S3Requests, "S3 requests count") \
M(KeeperAliveConnections, "Number of alive connections") \ M(KeeperAliveConnections, "Number of alive connections") \
M(KeeperOutstandingRequets, "Number of outstanding requests") \ M(KeeperOutstandingRequets, "Number of outstanding requests") \
M(ThreadsInOvercommitTracker, "Number of waiting threads inside of OvercommitTracker") \ M(ThreadsInOvercommitTracker, "Number of waiting threads inside of OvercommitTracker") \

View File

@ -2,6 +2,7 @@
#include <Common/DateLUT.h> #include <Common/DateLUT.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/re2.h>
#include <chrono> #include <chrono>
#include <filesystem> #include <filesystem>
@ -11,15 +12,6 @@
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/replace.hpp> #include <boost/algorithm/string/replace.hpp>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace fs = std::filesystem; namespace fs = std::filesystem;
namespace DB namespace DB

View File

@ -9,7 +9,8 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR; extern const int LOGICAL_ERROR;
} }
std::function<Priority(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const GetPriorityForLoadBalancing::Func
GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
{ {
std::function<Priority(size_t index)> get_priority; std::function<Priority(size_t index)> get_priority;
switch (load_balance) switch (load_balance)
@ -33,19 +34,26 @@ std::function<Priority(size_t index)> GetPriorityForLoadBalancing::getPriorityFu
get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; }; get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; };
break; break;
case LoadBalancing::ROUND_ROBIN: case LoadBalancing::ROUND_ROBIN:
if (last_used >= pool_size) auto local_last_used = last_used % pool_size;
last_used = 0;
++last_used; ++last_used;
/* Consider pool_size equals to 5
* last_used = 1 -> get_priority: 0 1 2 3 4 // Example: pool_size = 5
* last_used = 2 -> get_priority: 4 0 1 2 3 // | local_last_used | i=0 | i=1 | i=2 | i=3 | i=4 |
* last_used = 3 -> get_priority: 4 3 0 1 2 // | 0 | 4 | 0 | 1 | 2 | 3 |
* ... // | 1 | 3 | 4 | 0 | 1 | 2 |
* */ // | 2 | 2 | 3 | 4 | 0 | 1 |
get_priority = [this, pool_size](size_t i) // | 3 | 1 | 2 | 3 | 4 | 0 |
// | 4 | 0 | 1 | 2 | 3 | 4 |
get_priority = [pool_size, local_last_used](size_t i)
{ {
++i; // To make `i` indexing start with 1 instead of 0 as `last_used` does size_t priority = pool_size - 1;
return Priority{static_cast<Int64>(i < last_used ? pool_size - i : i - last_used)}; if (i < local_last_used)
priority = pool_size - 1 - (local_last_used - i);
if (i > local_last_used)
priority = i - local_last_used - 1;
return Priority{static_cast<Int64>(priority)};
}; };
break; break;
} }

View File

@ -8,7 +8,12 @@ namespace DB
class GetPriorityForLoadBalancing class GetPriorityForLoadBalancing
{ {
public: public:
explicit GetPriorityForLoadBalancing(LoadBalancing load_balancing_) : load_balancing(load_balancing_) {} using Func = std::function<Priority(size_t index)>;
explicit GetPriorityForLoadBalancing(LoadBalancing load_balancing_, size_t last_used_ = 0)
: load_balancing(load_balancing_), last_used(last_used_)
{
}
GetPriorityForLoadBalancing() = default; GetPriorityForLoadBalancing() = default;
bool operator == (const GetPriorityForLoadBalancing & other) const bool operator == (const GetPriorityForLoadBalancing & other) const
@ -23,7 +28,7 @@ public:
return !(*this == other); return !(*this == other);
} }
std::function<Priority(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const; Func getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
std::vector<size_t> hostname_prefix_distance; /// Prefix distances from name of this host to the names of hosts of pools. std::vector<size_t> hostname_prefix_distance; /// Prefix distances from name of this host to the names of hosts of pools.
std::vector<size_t> hostname_levenshtein_distance; /// Levenshtein Distances from name of this host to the names of hosts of pools. std::vector<size_t> hostname_levenshtein_distance; /// Levenshtein Distances from name of this host to the names of hosts of pools.

View File

@ -1,15 +1,7 @@
#include <Common/HTTPHeaderFilter.h> #include <Common/HTTPHeaderFilter.h>
#include <Common/StringUtils/StringUtils.h> #include <Common/StringUtils/StringUtils.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/re2.h>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
namespace DB namespace DB
{ {

View File

@ -2,10 +2,11 @@
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/ICachePolicyUserQuota.h> #include <Common/ICachePolicyUserQuota.h>
#include <base/UUID.h>
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <mutex> #include <optional>
namespace DB namespace DB
{ {
@ -43,7 +44,7 @@ public:
virtual void setMaxCount(size_t /*max_count*/) = 0; virtual void setMaxCount(size_t /*max_count*/) = 0;
virtual void setMaxSizeInBytes(size_t /*max_size_in_bytes*/) = 0; virtual void setMaxSizeInBytes(size_t /*max_size_in_bytes*/) = 0;
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) { user_quotas->setQuotaForUser(user_name, max_size_in_bytes, max_entries); } virtual void setQuotaForUser(const UUID & user_id, size_t max_size_in_bytes, size_t max_entries) { user_quotas->setQuotaForUser(user_id, max_size_in_bytes, max_entries); }
/// HashFunction usually hashes the entire key and the found key will be equal the provided key. In such cases, use get(). It is also /// HashFunction usually hashes the entire key and the found key will be equal the provided key. In such cases, use get(). It is also
/// possible to store other, non-hashed data in the key. In that case, the found key is potentially different from the provided key. /// possible to store other, non-hashed data in the key. In that case, the found key is potentially different from the provided key.

View File

@ -1,5 +1,6 @@
#pragma once #pragma once
#include <base/UUID.h>
#include <base/types.h> #include <base/types.h>
namespace DB namespace DB
@ -15,14 +16,14 @@ class ICachePolicyUserQuota
{ {
public: public:
/// Register or update the user's quota for the given resource. /// Register or update the user's quota for the given resource.
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) = 0; virtual void setQuotaForUser(const UUID & user_id, size_t max_size_in_bytes, size_t max_entries) = 0;
/// Update the actual resource usage for the given user. /// Update the actual resource usage for the given user.
virtual void increaseActual(const String & user_name, size_t entry_size_in_bytes) = 0; virtual void increaseActual(const UUID & user_id, size_t entry_size_in_bytes) = 0;
virtual void decreaseActual(const String & user_name, size_t entry_size_in_bytes) = 0; virtual void decreaseActual(const UUID & user_id, size_t entry_size_in_bytes) = 0;
/// Is the user allowed to write a new entry into the cache? /// Is the user allowed to write a new entry into the cache?
virtual bool approveWrite(const String & user_name, size_t entry_size_in_bytes) const = 0; virtual bool approveWrite(const UUID & user_id, size_t entry_size_in_bytes) const = 0;
virtual ~ICachePolicyUserQuota() = default; virtual ~ICachePolicyUserQuota() = default;
}; };
@ -33,10 +34,10 @@ using CachePolicyUserQuotaPtr = std::unique_ptr<ICachePolicyUserQuota>;
class NoCachePolicyUserQuota : public ICachePolicyUserQuota class NoCachePolicyUserQuota : public ICachePolicyUserQuota
{ {
public: public:
void setQuotaForUser(const String & /*user_name*/, size_t /*max_size_in_bytes*/, size_t /*max_entries*/) override {} void setQuotaForUser(const UUID & /*user_id*/, size_t /*max_size_in_bytes*/, size_t /*max_entries*/) override {}
void increaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {} void increaseActual(const UUID & /*user_id*/, size_t /*entry_size_in_bytes*/) override {}
void decreaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {} void decreaseActual(const UUID & /*user_id*/, size_t /*entry_size_in_bytes*/) override {}
bool approveWrite(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) const override { return true; } bool approveWrite(const UUID & /*user_id*/, size_t /*entry_size_in_bytes*/) const override { return true; }
}; };

88
src/Common/Jemalloc.cpp Normal file
View File

@ -0,0 +1,88 @@
#include <Common/Jemalloc.h>
#if USE_JEMALLOC
#include <Common/Stopwatch.h>
#include <Common/logger_useful.h>
#include <jemalloc/jemalloc.h>
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
namespace ProfileEvents
{
extern const Event MemoryAllocatorPurge;
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
}
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
void purgeJemallocArenas()
{
LOG_TRACE(&Poco::Logger::get("SystemJemalloc"), "Purging unused memory");
Stopwatch watch;
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds());
}
void checkJemallocProfilingEnabled()
{
bool active = true;
size_t active_size = sizeof(active);
mallctl("opt.prof", &active, &active_size, nullptr, 0);
if (!active)
throw Exception(
ErrorCodes::BAD_ARGUMENTS,
"ClickHouse was started without enabling profiling for jemalloc. To use jemalloc's profiler, following env variable should be "
"set: MALLOC_CONF=background_thread:true,prof:true");
}
void setJemallocProfileActive(bool value)
{
checkJemallocProfilingEnabled();
bool active = true;
size_t active_size = sizeof(active);
mallctl("prof.active", &active, &active_size, nullptr, 0);
if (active == value)
{
LOG_TRACE(&Poco::Logger::get("SystemJemalloc"), "Profiling is already {}", active ? "enabled" : "disabled");
return;
}
mallctl("prof.active", nullptr, nullptr, &value, sizeof(bool));
LOG_TRACE(&Poco::Logger::get("SystemJemalloc"), "Profiling is {}", value ? "enabled" : "disabled");
}
std::string flushJemallocProfile(const std::string & file_prefix)
{
checkJemallocProfilingEnabled();
char * prefix_buffer;
size_t prefix_size = sizeof(prefix_buffer);
int n = mallctl("opt.prof_prefix", &prefix_buffer, &prefix_size, nullptr, 0);
if (!n && std::string_view(prefix_buffer) != "jeprof")
{
LOG_TRACE(&Poco::Logger::get("SystemJemalloc"), "Flushing memory profile with prefix {}", prefix_buffer);
mallctl("prof.dump", nullptr, nullptr, nullptr, 0);
return prefix_buffer;
}
static std::atomic<size_t> profile_counter{0};
std::string profile_dump_path = fmt::format("{}.{}.{}.heap", file_prefix, getpid(), profile_counter.fetch_add(1));
const auto * profile_dump_path_str = profile_dump_path.c_str();
LOG_TRACE(&Poco::Logger::get("SystemJemalloc"), "Flushing memory profile to {}", profile_dump_path_str);
mallctl("prof.dump", nullptr, nullptr, &profile_dump_path_str, sizeof(profile_dump_path_str));
return profile_dump_path;
}
}
#endif

22
src/Common/Jemalloc.h Normal file
View File

@ -0,0 +1,22 @@
#pragma once
#include "config.h"
#if USE_JEMALLOC
#include <string>
namespace DB
{
void purgeJemallocArenas();
void checkJemallocProfilingEnabled();
void setJemallocProfileActive(bool value);
std::string flushJemallocProfile(const std::string & file_prefix);
}
#endif

View File

@ -0,0 +1,494 @@
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
# pragma clang diagnostic ignored "-Wgnu-anonymous-struct"
# pragma clang diagnostic ignored "-Wnested-anon-types"
# pragma clang diagnostic ignored "-Wunused-parameter"
# pragma clang diagnostic ignored "-Wshadow-field-in-constructor"
# pragma clang diagnostic ignored "-Wdtor-name"
#endif
#include <re2/re2.h>
#include <re2/regexp.h>
#include <re2/walker-inl.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#ifdef LOG_INFO
#undef LOG_INFO
#undef LOG_WARNING
#undef LOG_ERROR
#undef LOG_FATAL
#endif
#include "MatchGenerator.h"
#include <Common/Exception.h>
#include <Common/thread_local_rng.h>
#include <map>
#include <functional>
#include <magic_enum.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
}
}
namespace re2
{
class RandomStringPrepareWalker : public Regexp::Walker<Regexp *>
{
private:
static constexpr int ImplicitMax = 100;
using Children = std::vector<Regexp *>;
class Generators;
/// This function objects look much prettier than lambda expression when stack traces are printed
class NodeFunction
{
public:
virtual size_t operator() (char * out, size_t size) = 0;
virtual size_t getRequiredSize() = 0;
virtual ~NodeFunction() = default;
};
using NodeFunctionPtr = std::shared_ptr<NodeFunction>;
using NodeFuncs = std::vector<NodeFunctionPtr>;
static NodeFuncs getFuncs(const Children & children_, const Generators & generators_)
{
NodeFuncs result;
result.reserve(children_.size());
for (auto * child: children_)
{
result.push_back(generators_.at(child));
}
return result;
}
class Generators: public std::map<re2::Regexp *, NodeFunctionPtr> {};
class RegexpConcatFunction : public NodeFunction
{
public:
RegexpConcatFunction(const Children & children_, const Generators & generators_)
: children(getFuncs(children_, generators_))
{
}
size_t operator () (char * out, size_t size) override
{
size_t total_size = 0;
for (auto & child: children)
{
size_t consumed = child->operator()(out, size);
chassert(consumed <= size);
out += consumed;
size -= consumed;
total_size += consumed;
}
return total_size;
}
size_t getRequiredSize() override
{
size_t total_size = 0;
for (auto & child: children)
total_size += child->getRequiredSize();
return total_size;
}
private:
NodeFuncs children;
};
class RegexpAlternateFunction : public NodeFunction
{
public:
RegexpAlternateFunction(const Children & children_, const Generators & generators_)
: children(getFuncs(children_, generators_))
{
}
size_t operator () (char * out, size_t size) override
{
std::uniform_int_distribution<int> distribution(0, static_cast<int>(children.size()-1));
int chosen = distribution(thread_local_rng);
size_t consumed = children[chosen]->operator()(out, size);
chassert(consumed <= size);
return consumed;
}
size_t getRequiredSize() override
{
size_t total_size = 0;
for (auto & child: children)
total_size = std::max(total_size, child->getRequiredSize());
return total_size;
}
private:
NodeFuncs children;
};
class RegexpRepeatFunction : public NodeFunction
{
public:
RegexpRepeatFunction(Regexp * re_, const Generators & generators_, int min_repeat_, int max_repeat_)
: func(generators_.at(re_))
, min_repeat(min_repeat_)
, max_repeat(max_repeat_)
{
}
size_t operator () (char * out, size_t size) override
{
std::uniform_int_distribution<int> distribution(min_repeat, max_repeat);
int ntimes = distribution(thread_local_rng);
size_t total_size = 0;
for (int i = 0; i < ntimes; ++i)
{
size_t consumed =func->operator()(out, size);
chassert(consumed <= size);
out += consumed;
size -= consumed;
total_size += consumed;
}
return total_size;
}
size_t getRequiredSize() override
{
return max_repeat * func->getRequiredSize();
}
private:
NodeFunctionPtr func;
int min_repeat = 0;
int max_repeat = 0;
};
class RegexpCharClassFunction : public NodeFunction
{
using CharRanges = std::vector<std::pair<re2::Rune, re2::Rune>>;
public:
explicit RegexpCharClassFunction(Regexp * re_)
{
CharClass * cc = re_->cc();
chassert(cc);
if (cc->empty())
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "kRegexpCharClass is empty");
char_count = cc->size();
char_ranges.reserve(std::distance(cc->begin(), cc->end()));
for (const auto range: *cc)
{
char_ranges.emplace_back(range.lo, range.hi);
}
}
size_t operator () (char * out, size_t size) override
{
chassert(UTFmax <= size);
std::uniform_int_distribution<int> distribution(1, char_count);
int chosen = distribution(thread_local_rng);
int count_down = chosen;
auto it = char_ranges.begin();
for (; it != char_ranges.end(); ++it)
{
auto [lo, hi] = *it;
auto range_len = hi - lo + 1;
if (count_down <= range_len)
break;
count_down -= range_len;
}
if (it == char_ranges.end())
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
"Unable to choose the rune. Runes {}, ranges {}, chosen {}",
char_count, char_ranges.size(), chosen);
auto [lo, _] = *it;
Rune r = lo + count_down - 1;
return re2::runetochar(out, &r);
}
size_t getRequiredSize() override
{
return UTFmax;
}
private:
int char_count = 0;
CharRanges char_ranges;
};
class RegexpLiteralStringFunction : public NodeFunction
{
public:
explicit RegexpLiteralStringFunction(Regexp * re_)
{
if (re_->nrunes() == 0)
return;
char buffer[UTFmax];
for (int i = 0; i < re_->nrunes(); ++i)
{
int n = re2::runetochar(buffer, &re_->runes()[i]);
literal_string += String(buffer, n);
}
}
size_t operator () (char * out, size_t size) override
{
chassert(literal_string.size() <= size);
memcpy(out, literal_string.data(), literal_string.size());
return literal_string.size();
}
size_t getRequiredSize() override
{
return literal_string.size();
}
private:
String literal_string;
};
class RegexpLiteralFunction : public NodeFunction
{
public:
explicit RegexpLiteralFunction(Regexp * re_)
{
char buffer[UTFmax];
Rune r = re_->rune();
int n = re2::runetochar(buffer, &r);
literal = String(buffer, n);
}
size_t operator () (char * out, size_t size) override
{
chassert(literal.size() <= size);
memcpy(out, literal.data(), literal.size());
return literal.size();
}
size_t getRequiredSize() override
{
return literal.size();
}
private:
String literal;
};
class ThrowExceptionFunction : public NodeFunction
{
public:
explicit ThrowExceptionFunction(Regexp * re_)
: operation(magic_enum::enum_name(re_->op()))
{
}
size_t operator () (char *, size_t) override
{
throw DB::Exception(
DB::ErrorCodes::BAD_ARGUMENTS,
"RandomStringPrepareWalker: regexp node '{}' is not supported for generating a random match",
operation);
}
size_t getRequiredSize() override
{
return 0;
}
private:
String operation;
};
public:
std::function<String()> getGenerator()
{
if (root == nullptr)
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "no root has been set");
if (generators.empty())
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "no generators");
auto root_func = generators.at(root);
auto required_buffer_size = root_func->getRequiredSize();
auto generator_func = [=] ()
-> String
{
auto buffer = String(required_buffer_size, '\0');
size_t size = root_func->operator()(buffer.data(), buffer.size());
buffer.resize(size);
return buffer;
};
root = nullptr;
generators = {};
return std::move(generator_func);
}
private:
Children CopyChildrenArgs(Regexp ** children, int nchild)
{
Children result;
result.reserve(nchild);
for (int i = 0; i < nchild; ++i)
result.push_back(Copy(children[i]));
return result;
}
Regexp * ShortVisit(Regexp* /*re*/, Regexp * /*parent_arg*/) override
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "ShortVisit should not be called");
}
Regexp * PreVisit(Regexp * re, Regexp * parent_arg, bool* /*stop*/) override /*noexcept*/
{
if (parent_arg == nullptr)
{
chassert(root == nullptr);
chassert(re != nullptr);
root = re;
}
return re;
}
Regexp * PostVisit(Regexp * re, Regexp * /*parent_arg*/, Regexp * pre_arg,
Regexp ** child_args, int nchild_args) override /*noexcept*/
{
switch (re->op())
{
case kRegexpConcat: // Matches concatenation of sub_[0..nsub-1].
generators[re] = std::make_shared<RegexpConcatFunction>(CopyChildrenArgs(child_args, nchild_args), generators);
break;
case kRegexpAlternate: // Matches union of sub_[0..nsub-1].
generators[re] = std::make_shared<RegexpAlternateFunction>(CopyChildrenArgs(child_args, nchild_args), generators);
break;
case kRegexpQuest: // Matches sub_[0] zero or one times.
chassert(nchild_args == 1);
generators[re] = std::make_shared<RegexpRepeatFunction>(child_args[0], generators, 0, 1);
break;
case kRegexpStar: // Matches sub_[0] zero or more times.
chassert(nchild_args == 1);
generators[re] = std::make_shared<RegexpRepeatFunction>(child_args[0], generators, 0, ImplicitMax);
break;
case kRegexpPlus: // Matches sub_[0] one or more times.
chassert(nchild_args == 1);
generators[re] = std::make_shared<RegexpRepeatFunction>(child_args[0], generators, 1, ImplicitMax);
break;
case kRegexpCharClass: // Matches character class given by cc_.
chassert(nchild_args == 0);
generators[re] = std::make_shared<RegexpCharClassFunction>(re);
break;
case kRegexpLiteralString: // Matches runes_.
chassert(nchild_args == 0);
generators[re] = std::make_shared<RegexpLiteralStringFunction>(re);
break;
case kRegexpLiteral: // Matches rune_.
chassert(nchild_args == 0);
generators[re] = std::make_shared<RegexpLiteralFunction>(re);
break;
case kRegexpCapture: // Parenthesized (capturing) subexpression.
chassert(nchild_args == 1);
generators[re] = generators.at(child_args[0]);
break;
case kRegexpNoMatch: // Matches no strings.
case kRegexpEmptyMatch: // Matches empty string.
case kRegexpRepeat: // Matches sub_[0] at least min_ times, at most max_ times.
case kRegexpAnyChar: // Matches any character.
case kRegexpAnyByte: // Matches any byte [sic].
case kRegexpBeginLine: // Matches empty string at beginning of line.
case kRegexpEndLine: // Matches empty string at end of line.
case kRegexpWordBoundary: // Matches word boundary "\b".
case kRegexpNoWordBoundary: // Matches not-a-word boundary "\B".
case kRegexpBeginText: // Matches empty string at beginning of text.
case kRegexpEndText: // Matches empty string at end of text.
case kRegexpHaveMatch: // Forces match of entire expression
generators[re] = std::make_shared<ThrowExceptionFunction>(re);
}
return pre_arg;
}
Regexp * root = nullptr;
Generators generators;
};
}
namespace DB
{
void RandomStringGeneratorByRegexp::RegexpPtrDeleter::operator() (re2::Regexp * re) const noexcept
{
re->Decref();
}
RandomStringGeneratorByRegexp::RandomStringGeneratorByRegexp(const String & re_str)
{
re2::RE2::Options options;
options.set_case_sensitive(true);
options.set_encoding(re2::RE2::Options::EncodingLatin1);
auto flags = static_cast<re2::Regexp::ParseFlags>(options.ParseFlags());
re2::RegexpStatus status;
regexp.reset(re2::Regexp::Parse(re_str, flags, &status));
if (!regexp)
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS,
"Error parsing regexp '{}': {}",
re_str, status.Text());
regexp.reset(regexp->Simplify());
auto walker = re2::RandomStringPrepareWalker();
walker.Walk(regexp.get(), {});
generatorFunc = walker.getGenerator();
{
auto test_check = generate();
auto matched = RE2::FullMatch(test_check, re2::RE2(re_str));
if (!matched)
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS,
"Generator is unable to produce random string for regexp '{}': {}",
re_str, test_check);
}
}
String RandomStringGeneratorByRegexp::generate() const
{
chassert(generatorFunc);
return generatorFunc();
}
}

View File

@ -0,0 +1,31 @@
#pragma once
#include <base/types.h>
#include <memory>
namespace re2
{
class Regexp;
}
namespace DB
{
class RandomStringGeneratorByRegexp
{
public:
explicit RandomStringGeneratorByRegexp(const String & re_str);
String generate() const;
private:
struct RegexpPtrDeleter
{
void operator()(re2::Regexp * re) const noexcept;
};
using RegexpPtr = std::unique_ptr<re2::Regexp, RegexpPtrDeleter>;
RegexpPtr regexp;
std::function<String()> generatorFunc;
};
}

View File

@ -65,4 +65,5 @@ ObjectStorageKey ObjectStorageKey::createAsAbsolute(String key_)
object_key.is_relative = false; object_key.is_relative = false;
return object_key; return object_key;
} }
} }

View File

@ -0,0 +1,94 @@
#include "ObjectStorageKeyGenerator.h"
#include <Common/getRandomASCIIString.h>
#include <Common/MatchGenerator.h>
#include <fmt/format.h>
class GeneratorWithTemplate : public DB::IObjectStorageKeysGenerator
{
public:
explicit GeneratorWithTemplate(String key_template_)
: key_template(std::move(key_template_))
, re_gen(key_template)
{
}
DB::ObjectStorageKey generate(const String &) const override
{
return DB::ObjectStorageKey::createAsAbsolute(re_gen.generate());
}
private:
String key_template;
DB::RandomStringGeneratorByRegexp re_gen;
};
class GeneratorWithPrefix : public DB::IObjectStorageKeysGenerator
{
public:
explicit GeneratorWithPrefix(String key_prefix_)
: key_prefix(std::move(key_prefix_))
{}
DB::ObjectStorageKey generate(const String &) const override
{
/// Path to store the new S3 object.
/// Total length is 32 a-z characters for enough randomness.
/// First 3 characters are used as a prefix for
/// https://aws.amazon.com/premiumsupport/knowledge-center/s3-object-key-naming-pattern/
constexpr size_t key_name_total_size = 32;
constexpr size_t key_name_prefix_size = 3;
/// Path to store new S3 object.
String key = fmt::format("{}/{}",
DB::getRandomASCIIString(key_name_prefix_size),
DB::getRandomASCIIString(key_name_total_size - key_name_prefix_size));
/// what ever key_prefix value is, consider that key as relative
return DB::ObjectStorageKey::createAsRelative(key_prefix, key);
}
private:
String key_prefix;
};
class GeneratorAsIsWithPrefix : public DB::IObjectStorageKeysGenerator
{
public:
explicit GeneratorAsIsWithPrefix(String key_prefix_)
: key_prefix(std::move(key_prefix_))
{}
DB::ObjectStorageKey generate(const String & path) const override
{
return DB::ObjectStorageKey::createAsRelative(key_prefix, path);
}
private:
String key_prefix;
};
namespace DB
{
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorAsIsWithPrefix(String key_prefix)
{
return std::make_shared<GeneratorAsIsWithPrefix>(std::move(key_prefix));
}
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorByPrefix(String key_prefix)
{
return std::make_shared<GeneratorWithPrefix>(std::move(key_prefix));
}
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorByTemplate(String key_template)
{
return std::make_shared<GeneratorWithTemplate>(std::move(key_template));
}
}

View File

@ -0,0 +1,22 @@
#pragma once
#include "ObjectStorageKey.h"
#include <memory>
namespace DB
{
class IObjectStorageKeysGenerator
{
public:
virtual ObjectStorageKey generate(const String & path) const = 0;
virtual ~IObjectStorageKeysGenerator() = default;
};
using ObjectStorageKeysGeneratorPtr = std::shared_ptr<IObjectStorageKeysGenerator>;
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorAsIsWithPrefix(String key_prefix);
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorByPrefix(String key_prefix);
ObjectStorageKeysGeneratorPtr createObjectStorageKeysGeneratorByTemplate(String key_template);
}

View File

@ -484,7 +484,7 @@ OptimizedRegularExpression::OptimizedRegularExpression(const std::string & regex
if (!is_trivial) if (!is_trivial)
{ {
/// Compile the re2 regular expression. /// Compile the re2 regular expression.
typename re2::RE2::Options regexp_options; re2::RE2::Options regexp_options;
/// Never write error messages to stderr. It's ignorant to do it from library code. /// Never write error messages to stderr. It's ignorant to do it from library code.
regexp_options.set_log_errors(false); regexp_options.set_log_errors(false);

View File

@ -5,17 +5,9 @@
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <Common/StringSearcher.h> #include <Common/StringSearcher.h>
#include <Common/re2.h>
#include "config.h" #include "config.h"
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <re2/re2.h>
#ifdef __clang__
# pragma clang diagnostic pop
#endif
/** Uses two ways to optimize a regular expression: /** Uses two ways to optimize a regular expression:
* 1. If the regular expression is trivial (reduces to finding a substring in a string), * 1. If the regular expression is trivial (reduces to finding a substring in a string),
* then replaces the search with strstr or strcasestr. * then replaces the search with strstr or strcasestr.

View File

@ -124,7 +124,9 @@ public:
size_t max_ignored_errors, size_t max_ignored_errors,
bool fallback_to_stale_replicas, bool fallback_to_stale_replicas,
const TryGetEntryFunc & try_get_entry, const TryGetEntryFunc & try_get_entry,
const GetPriorityFunc & get_priority = GetPriorityFunc()); const GetPriorityFunc & get_priority);
size_t getPoolSize() const { return nested_pools.size(); }
protected: protected:
@ -147,7 +149,7 @@ protected:
return std::make_tuple(shared_pool_states, nested_pools, last_error_decrease_time); return std::make_tuple(shared_pool_states, nested_pools, last_error_decrease_time);
} }
NestedPools nested_pools; const NestedPools nested_pools;
const time_t decrease_error_period; const time_t decrease_error_period;
const size_t max_error_cap; const size_t max_error_cap;

View File

@ -391,6 +391,9 @@ The server successfully detected this situation and will download merged part fr
M(DiskS3PutObject, "Number of DiskS3 API PutObject calls.") \ M(DiskS3PutObject, "Number of DiskS3 API PutObject calls.") \
M(DiskS3GetObject, "Number of DiskS3 API GetObject calls.") \ M(DiskS3GetObject, "Number of DiskS3 API GetObject calls.") \
\ \
M(S3Clients, "Number of created S3 clients.") \
M(TinyS3Clients, "Number of S3 clients copies which reuse an existing auth provider from another client.") \
\
M(EngineFileLikeReadFiles, "Number of files read in table engines working with files (like File/S3/URL/HDFS).") \ M(EngineFileLikeReadFiles, "Number of files read in table engines working with files (like File/S3/URL/HDFS).") \
\ \
M(ReadBufferFromS3Microseconds, "Time spent on reading from S3.") \ M(ReadBufferFromS3Microseconds, "Time spent on reading from S3.") \
@ -599,6 +602,19 @@ The server successfully detected this situation and will download merged part fr
M(LogError, "Number of log messages with level Error") \ M(LogError, "Number of log messages with level Error") \
M(LogFatal, "Number of log messages with level Fatal") \ M(LogFatal, "Number of log messages with level Fatal") \
\ \
M(InterfaceHTTPSendBytes, "Number of bytes sent through HTTP interfaces") \
M(InterfaceHTTPReceiveBytes, "Number of bytes received through HTTP interfaces") \
M(InterfaceNativeSendBytes, "Number of bytes sent through native interfaces") \
M(InterfaceNativeReceiveBytes, "Number of bytes received through native interfaces") \
M(InterfacePrometheusSendBytes, "Number of bytes sent through Prometheus interfaces") \
M(InterfacePrometheusReceiveBytes, "Number of bytes received through Prometheus interfaces") \
M(InterfaceInterserverSendBytes, "Number of bytes sent through interserver interfaces") \
M(InterfaceInterserverReceiveBytes, "Number of bytes received through interserver interfaces") \
M(InterfaceMySQLSendBytes, "Number of bytes sent through MySQL interfaces") \
M(InterfaceMySQLReceiveBytes, "Number of bytes received through MySQL interfaces") \
M(InterfacePostgreSQLSendBytes, "Number of bytes sent through PostgreSQL interfaces") \
M(InterfacePostgreSQLReceiveBytes, "Number of bytes received through PostgreSQL interfaces") \
\
M(ParallelReplicasUsedCount, "Number of replicas used to execute a query with task-based parallel replicas") \ M(ParallelReplicasUsedCount, "Number of replicas used to execute a query with task-based parallel replicas") \
#ifdef APPLY_FOR_EXTERNAL_EVENTS #ifdef APPLY_FOR_EXTERNAL_EVENTS

Some files were not shown because too many files have changed in this diff Show More