diff --git a/.gitmodules b/.gitmodules index 1a464ee1170..af90c788012 100644 --- a/.gitmodules +++ b/.gitmodules @@ -354,3 +354,6 @@ [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git +[submodule "contrib/pocketfft"] + path = contrib/pocketfft + url = https://github.com/mreineck/pocketfft.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 4fe7a1e05e7..063cfc77302 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,8 +21,11 @@ include (cmake/clang_tidy.cmake) include (cmake/git.cmake) include (cmake/utils.cmake) +# This is needed to set up the CMAKE_INSTALL_BINDIR variable. +include (GNUInstallDirs) + # Ignore export() since we don't use it, -# but it gets broken with a global targets via link_libraries() +# but it gets broken with global targets via link_libraries() macro (export) endmacro () @@ -460,14 +463,6 @@ endif () message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}") -include (GNUInstallDirs) - -# When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc. - -if (TARGET global-group) - install (EXPORT global DESTINATION cmake) -endif () - add_subdirectory (contrib EXCLUDE_FROM_ALL) if (NOT ENABLE_JEMALLOC) diff --git a/README.md b/README.md index d0fd19c0b73..7642cb100ed 100644 --- a/README.md +++ b/README.md @@ -33,8 +33,6 @@ curl https://clickhouse.com/ | sh ## Upcoming Events -* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/296334923/) - Nov 14 -* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/296334976/) - Nov 15 * [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30 * [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11 * [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/296488840/) - Dec 12 diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index 65677ed2cf3..c967fa5b11b 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -35,12 +35,6 @@ if (GLIBC_COMPATIBILITY) target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY}) - install( - TARGETS glibc-compatibility ${MEMCPY_LIBRARY} - EXPORT global - ARCHIVE DESTINATION lib - ) - message (STATUS "Some symbols from glibc will be replaced for compatibility") elseif (CLICKHOUSE_OFFICIAL_BUILD) diff --git a/base/harmful/CMakeLists.txt b/base/harmful/CMakeLists.txt index 399f6ecc625..c19661875be 100644 --- a/base/harmful/CMakeLists.txt +++ b/base/harmful/CMakeLists.txt @@ -1,2 +1 @@ add_library(harmful harmful.c) -install(TARGETS harmful EXPORT global ARCHIVE DESTINATION lib) diff --git a/base/poco/Net/src/HTTPServerSession.cpp b/base/poco/Net/src/HTTPServerSession.cpp index f6d3c4e5b92..d4f2b24879e 100644 --- a/base/poco/Net/src/HTTPServerSession.cpp +++ b/base/poco/Net/src/HTTPServerSession.cpp @@ -26,7 +26,6 @@ HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParam _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) { setTimeout(pParams->getTimeout()); - this->socket().setReceiveTimeout(pParams->getTimeout()); } diff --git a/base/poco/Net/src/HTTPSession.cpp b/base/poco/Net/src/HTTPSession.cpp index d2663baaf9f..8f951b3102c 100644 --- a/base/poco/Net/src/HTTPSession.cpp +++ b/base/poco/Net/src/HTTPSession.cpp @@ -93,9 +93,34 @@ void HTTPSession::setTimeout(const Poco::Timespan& timeout) void HTTPSession::setTimeout(const Poco::Timespan& connectionTimeout, const Poco::Timespan& sendTimeout, const Poco::Timespan& receiveTimeout) { - _connectionTimeout = connectionTimeout; - _sendTimeout = sendTimeout; - _receiveTimeout = receiveTimeout; + try + { + _connectionTimeout = connectionTimeout; + + if (_sendTimeout.totalMicroseconds() != sendTimeout.totalMicroseconds()) { + _sendTimeout = sendTimeout; + + if (connected()) + _socket.setSendTimeout(_sendTimeout); + } + + if (_receiveTimeout.totalMicroseconds() != receiveTimeout.totalMicroseconds()) { + _receiveTimeout = receiveTimeout; + + if (connected()) + _socket.setReceiveTimeout(_receiveTimeout); + } + } + catch (NetException &) + { +#ifndef NDEBUG + throw; +#else + // mute exceptions in release + // just in case when changing settings on socket is not allowed + // however it should be OK for timeouts +#endif + } } diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake index e8bf856332a..0df70d82d2c 100644 --- a/cmake/ccache.cmake +++ b/cmake/ccache.cmake @@ -9,10 +9,10 @@ if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MA return() endif() -set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (ccache, then sccache), 'ccache', 'sccache', or 'disabled'") +set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (sccache, then ccache), 'ccache', 'sccache', or 'disabled'") if(COMPILER_CACHE STREQUAL "auto") - find_program (CCACHE_EXECUTABLE NAMES ccache sccache) + find_program (CCACHE_EXECUTABLE NAMES sccache ccache) elseif (COMPILER_CACHE STREQUAL "ccache") find_program (CCACHE_EXECUTABLE ccache) elseif(COMPILER_CACHE STREQUAL "sccache") @@ -21,7 +21,7 @@ elseif(COMPILER_CACHE STREQUAL "disabled") message(STATUS "Using *ccache: no (disabled via configuration)") return() else() - message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|ccache|sccache|disabled), value: '${COMPILER_CACHE}'") + message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|sccache|ccache|disabled), value: '${COMPILER_CACHE}'") endif() diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 765e36403ad..cfa9c314bc0 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -134,60 +134,52 @@ elseif (ARCH_AMD64) # ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/ # AVX. We only assume that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary. - SET (HAVE_SSSE3 1) - SET (HAVE_SSE41 1) - SET (HAVE_SSE42 1) - SET (HAVE_PCLMULQDQ 1) - SET (HAVE_POPCNT 1) - SET (HAVE_AVX 1) - SET (HAVE_AVX2 1) - SET (HAVE_AVX512 1) - SET (HAVE_AVX512_VBMI 1) - SET (HAVE_BMI 1) - SET (HAVE_BMI2 1) - - if (HAVE_SSSE3 AND ENABLE_SSSE3) + if (ENABLE_SSSE3) set (COMPILER_FLAGS "${COMPILER_FLAGS} -mssse3") endif () - if (HAVE_SSE41 AND ENABLE_SSE41) + + if (ENABLE_SSE41) set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.1") endif () - if (HAVE_SSE42 AND ENABLE_SSE42) + + if (ENABLE_SSE42) set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.2") endif () - if (HAVE_PCLMULQDQ AND ENABLE_PCLMULQDQ) + + if (ENABLE_PCLMULQDQ) set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpclmul") endif () - if (HAVE_POPCNT AND ENABLE_POPCNT) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpopcnt") - endif () - if (HAVE_AVX AND ENABLE_AVX) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx") - endif () - if (HAVE_AVX2 AND ENABLE_AVX2) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx2") - endif () - if (HAVE_AVX512 AND ENABLE_AVX512) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512f -mavx512bw -mavx512vl") - endif () - if (HAVE_AVX512 AND ENABLE_AVX512 AND HAVE_AVX512_VBMI AND ENABLE_AVX512_VBMI) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512vbmi") - endif () - if (HAVE_BMI AND ENABLE_BMI) + + if (ENABLE_BMI) set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi") endif () - if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi2") + + if (ENABLE_POPCNT) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpopcnt") endif () + + if (ENABLE_AVX) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx") + endif () + + if (ENABLE_AVX2) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx2") + if (ENABLE_BMI2) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi2") + endif () + endif () + + if (ENABLE_AVX512) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512f -mavx512bw -mavx512vl") + if (ENABLE_AVX512_VBMI) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512vbmi") + endif () + endif () + if (ENABLE_AVX512_FOR_SPEC_OP) - set (X86_INTRINSICS_FLAGS "") - if (HAVE_BMI) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi") - endif () - if (HAVE_AVX512) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256") - endif () + set (X86_INTRINSICS_FLAGS "-mbmi -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256") endif () + else () # RISC-V + exotic platforms endif () diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index 42b8473cb75..cf0210d9b45 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -22,9 +22,3 @@ link_libraries(global-group) target_link_libraries(global-group INTERFACE $ ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake index 569b02bb642..178153c1098 100644 --- a/cmake/darwin/toolchain-aarch64.cmake +++ b/cmake/darwin/toolchain-aarch64.cmake @@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin") set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/darwin/toolchain-x86_64.cmake b/cmake/darwin/toolchain-x86_64.cmake index c4527d2fc0d..b9cbe72a2b6 100644 --- a/cmake/darwin/toolchain-x86_64.cmake +++ b/cmake/darwin/toolchain-x86_64.cmake @@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-apple-darwin") set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-x86_64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake index 65bf296ee09..1eeb1a872bd 100644 --- a/cmake/freebsd/default_libs.cmake +++ b/cmake/freebsd/default_libs.cmake @@ -25,9 +25,3 @@ link_libraries(global-group) target_link_libraries(global-group INTERFACE $ ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/freebsd/toolchain-aarch64.cmake b/cmake/freebsd/toolchain-aarch64.cmake index 8a8da00f3be..53b7856ed03 100644 --- a/cmake/freebsd/toolchain-aarch64.cmake +++ b/cmake/freebsd/toolchain-aarch64.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-aarch64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/toolchain-ppc64le.cmake b/cmake/freebsd/toolchain-ppc64le.cmake index c3f6594204d..bb23f0fbafc 100644 --- a/cmake/freebsd/toolchain-ppc64le.cmake +++ b/cmake/freebsd/toolchain-ppc64le.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-unknown-freebsd13") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake index 460de6a7d39..4635880b4a6 100644 --- a/cmake/freebsd/toolchain-x86_64.cmake +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake index 52f301ab8ad..dd0c4b080fe 100644 --- a/cmake/fuzzer.cmake +++ b/cmake/fuzzer.cmake @@ -4,8 +4,8 @@ if (FUZZER) # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them # (tests) have entry point for fuzzer and it's not checked. - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") # NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable if (NOT LIB_FUZZING_ENGINE) diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 28ccb62e10c..8e48fc9b9d8 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -21,7 +21,7 @@ if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY) set (PARALLEL_COMPILE_JOBS 1) endif () if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES) - message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") + message("The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") endif() endif () @@ -32,7 +32,7 @@ if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY) set (PARALLEL_LINK_JOBS 1) endif () if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES) - message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") + message("The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") endif() endif () diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 56a663a708e..8552097fa57 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -50,9 +50,3 @@ target_link_libraries(global-group INTERFACE $ -Wl,--end-group ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/linux/toolchain-aarch64.cmake b/cmake/linux/toolchain-aarch64.cmake index 2dedef8859f..b80cc01296d 100644 --- a/cmake/linux/toolchain-aarch64.cmake +++ b/cmake/linux/toolchain-aarch64.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc") @@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-ppc64le.cmake b/cmake/linux/toolchain-ppc64le.cmake index c46ea954b71..98e8f7e8489 100644 --- a/cmake/linux/toolchain-ppc64le.cmake +++ b/cmake/linux/toolchain-ppc64le.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "powerpc64le-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc") @@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-riscv64.cmake b/cmake/linux/toolchain-riscv64.cmake index 7f876f88d72..ae5a38f08eb 100644 --- a/cmake/linux/toolchain-riscv64.cmake +++ b/cmake/linux/toolchain-riscv64.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "riscv64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}") @@ -27,9 +23,3 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd") # ld.lld: error: section size decrease is too large # But GNU BinUtils work. set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE) - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-s390x.cmake b/cmake/linux/toolchain-s390x.cmake index 945eb9affa4..d34329fb3bb 100644 --- a/cmake/linux/toolchain-s390x.cmake +++ b/cmake/linux/toolchain-s390x.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "s390x-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "s390x-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "s390x-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-s390x") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc") @@ -23,9 +19,3 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-x86_64-musl.cmake b/cmake/linux/toolchain-x86_64-musl.cmake index bc327e5ac25..fa7b3eaf0d1 100644 --- a/cmake/linux/toolchain-x86_64-musl.cmake +++ b/cmake/linux/toolchain-x86_64-musl.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-musl") set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl") set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}") @@ -21,11 +17,5 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - set (USE_MUSL 1) add_definitions(-DUSE_MUSL=1) diff --git a/cmake/linux/toolchain-x86_64.cmake b/cmake/linux/toolchain-x86_64.cmake index 55b9df79f70..e341219a7e5 100644 --- a/cmake/linux/toolchain-x86_64.cmake +++ b/cmake/linux/toolchain-x86_64.cmake @@ -19,10 +19,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc") @@ -32,9 +28,3 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 6b38d16bf63..a8f0705df88 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -44,6 +44,7 @@ else () endif () add_contrib (miniselect-cmake miniselect) add_contrib (pdqsort-cmake pdqsort) +add_contrib (pocketfft-cmake pocketfft) add_contrib (crc32-vpmsum-cmake crc32-vpmsum) add_contrib (sparsehash-c11-cmake sparsehash-c11) add_contrib (abseil-cpp-cmake abseil-cpp) diff --git a/contrib/abseil-cpp-cmake/CMakeLists.txt b/contrib/abseil-cpp-cmake/CMakeLists.txt index 2901daf32db..e6c3268c57a 100644 --- a/contrib/abseil-cpp-cmake/CMakeLists.txt +++ b/contrib/abseil-cpp-cmake/CMakeLists.txt @@ -1,33 +1,3428 @@ set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") +set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}") + +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +function(absl_cc_library) + cmake_parse_arguments(ABSL_CC_LIB + "DISABLE_INSTALL;PUBLIC;TESTONLY" + "NAME" + "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + set(_NAME "absl_${ABSL_CC_LIB_NAME}") + + # Check if this is a header-only library + set(ABSL_CC_SRCS "${ABSL_CC_LIB_SRCS}") + foreach(src_file IN LISTS ABSL_CC_SRCS) + if(${src_file} MATCHES ".*\\.(h|inc)") + list(REMOVE_ITEM ABSL_CC_SRCS "${src_file}") + endif() + endforeach() + + if(ABSL_CC_SRCS STREQUAL "") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + set(ABSL_CC_LIB_IS_INTERFACE 0) + endif() + + if(NOT ABSL_CC_LIB_IS_INTERFACE) + add_library(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + + target_include_directories(${_NAME} + PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}") + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_LIB_COPTS}) + target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) + + else() + # Generating header-only library + add_library(${_NAME} INTERFACE) + target_include_directories(${_NAME} + INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}") + + target_link_libraries(${_NAME} + INTERFACE + ${ABSL_CC_LIB_DEPS} + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) + + endif() + + add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) +endfunction() + + +set(DIR ${ABSL_ROOT_DIR}/absl/algorithm) + +absl_cc_library( + NAME + algorithm + HDRS + "${DIR}/algorithm.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + algorithm_container + HDRS + "${DIR}/container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/base) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + atomic_hook + HDRS + "${DIR}/internal/atomic_hook.h" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + errno_saver + HDRS + "${DIR}/internal/errno_saver.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + log_severity + HDRS + "${DIR}/log_severity.h" + SRCS + "${DIR}/log_severity.cc" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + nullability + HDRS + "${DIR}/nullability.h" + SRCS + "${DIR}/internal/nullability_impl.h" + DEPS + absl::core_headers + absl::type_traits + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_logging_internal + HDRS + "${DIR}/internal/raw_logging.h" + SRCS + "${DIR}/internal/raw_logging.cc" + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::errno_saver + absl::log_severity + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + spinlock_wait + HDRS + "${DIR}/internal/spinlock_wait.h" + SRCS + "${DIR}/internal/spinlock_akaros.inc" + "${DIR}/internal/spinlock_linux.inc" + "${DIR}/internal/spinlock_posix.inc" + "${DIR}/internal/spinlock_wait.cc" + "${DIR}/internal/spinlock_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::errno_saver +) + +absl_cc_library( + NAME + config + HDRS + "${DIR}/config.h" + "${DIR}/options.h" + "${DIR}/policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "${DIR}/dynamic_annotations.h" + SRCS + "${DIR}/internal/dynamic_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + core_headers + HDRS + "${DIR}/attributes.h" + "${DIR}/const_init.h" + "${DIR}/macros.h" + "${DIR}/optimization.h" + "${DIR}/port.h" + "${DIR}/thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + malloc_internal + HDRS + "${DIR}/internal/direct_mmap.h" + "${DIR}/internal/low_level_alloc.h" + SRCS + "${DIR}/internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + Threads::Threads +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + base_internal + HDRS + "${DIR}/internal/hide_ptr.h" + "${DIR}/internal/identity.h" + "${DIR}/internal/inline_variable.h" + "${DIR}/internal/invoke.h" + "${DIR}/internal/scheduling_mode.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::type_traits +) + +absl_cc_library( + NAME + base + HDRS + "${DIR}/call_once.h" + "${DIR}/casts.h" + "${DIR}/internal/cycleclock.h" + "${DIR}/internal/cycleclock_config.h" + "${DIR}/internal/low_level_scheduling.h" + "${DIR}/internal/per_thread_tls.h" + "${DIR}/internal/spinlock.h" + "${DIR}/internal/sysinfo.h" + "${DIR}/internal/thread_identity.h" + "${DIR}/internal/tsan_mutex_interface.h" + "${DIR}/internal/unscaledcycleclock.h" + "${DIR}/internal/unscaledcycleclock_config.h" + SRCS + "${DIR}/internal/cycleclock.cc" + "${DIR}/internal/spinlock.cc" + "${DIR}/internal/sysinfo.cc" + "${DIR}/internal/thread_identity.cc" + "${DIR}/internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::atomic_hook + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::log_severity + absl::raw_logging_internal + absl::spinlock_wait + absl::type_traits + Threads::Threads + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + throw_delegate + HDRS + "${DIR}/internal/throw_delegate.h" + SRCS + "${DIR}/internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + pretty_function + HDRS + "${DIR}/internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + endian + HDRS + "${DIR}/internal/endian.h" + "${DIR}/internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + scoped_set_env + SRCS + "${DIR}/internal/scoped_set_env.cc" + HDRS + "${DIR}/internal/scoped_set_env.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + strerror + SRCS + "${DIR}/internal/strerror.cc" + HDRS + "${DIR}/internal/strerror.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::errno_saver +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + fast_type_id + HDRS + "${DIR}/internal/fast_type_id.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + prefetch + HDRS + "${DIR}/prefetch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +set(DIR ${ABSL_ROOT_DIR}/absl/cleanup) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cleanup_internal + HDRS + "${DIR}/internal/cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + cleanup + HDRS + "${DIR}/cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::cleanup_internal + absl::config + absl::core_headers + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/container) + +absl_cc_library( + NAME + btree + HDRS + "${DIR}/btree_map.h" + "${DIR}/btree_set.h" + "${DIR}/internal/btree.h" + "${DIR}/internal/btree_container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::container_common + absl::common_policy_traits + absl::compare + absl::compressed_tuple + absl::container_memory + absl::cord + absl::core_headers + absl::layout + absl::memory + absl::raw_logging_internal + absl::strings + absl::throw_delegate + absl::type_traits + absl::utility +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + compressed_tuple + HDRS + "${DIR}/internal/compressed_tuple.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + fixed_array + HDRS + "${DIR}/fixed_array.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::algorithm + absl::config + absl::core_headers + absl::dynamic_annotations + absl::throw_delegate + absl::memory + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + inlined_vector_internal + HDRS + "${DIR}/internal/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::core_headers + absl::memory + absl::span + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + inlined_vector + HDRS + "${DIR}/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::inlined_vector_internal + absl::throw_delegate + absl::memory + absl::type_traits + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + counting_allocator + HDRS + "${DIR}/internal/counting_allocator.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + flat_hash_map + HDRS + "${DIR}/flat_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + flat_hash_set + HDRS + "${DIR}/flat_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_set + absl::algorithm_container + absl::core_headers + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + node_hash_map + HDRS + "${DIR}/node_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + node_hash_set + HDRS + "${DIR}/node_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_set + absl::algorithm_container + absl::memory + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_memory + HDRS + "${DIR}/internal/container_memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::memory + absl::type_traits + absl::utility + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_function_defaults + HDRS + "${DIR}/internal/hash_function_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cord + absl::hash + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_policy_traits + HDRS + "${DIR}/internal/hash_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::common_policy_traits + absl::meta + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + common_policy_traits + HDRS + "${DIR}/internal/common_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtablez_sampler + HDRS + "${DIR}/internal/hashtablez_sampler.h" + SRCS + "${DIR}/internal/hashtablez_sampler.cc" + "${DIR}/internal/hashtablez_sampler_force_weak_definition.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::exponential_biased + absl::raw_logging_internal + absl::sample_recorder + absl::synchronization + absl::time +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug + HDRS + "${DIR}/internal/hashtable_debug.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hashtable_debug_hooks +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug_hooks + HDRS + "${DIR}/internal/hashtable_debug_hooks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + node_slot_policy + HDRS + "${DIR}/internal/node_slot_policy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_map + HDRS + "${DIR}/internal/raw_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::raw_hash_set + absl::throw_delegate + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_common + HDRS + "${DIR}/internal/common.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_set + HDRS + "${DIR}/internal/raw_hash_set.h" + SRCS + "${DIR}/internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::compressed_tuple + absl::config + absl::container_common + absl::container_memory + absl::core_headers + absl::dynamic_annotations + absl::endian + absl::hash + absl::hash_policy_traits + absl::hashtable_debug_hooks + absl::hashtablez_sampler + absl::memory + absl::meta + absl::optional + absl::prefetch + absl::raw_logging_internal + absl::utility + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + layout + HDRS + "${DIR}/internal/layout.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::meta + absl::strings + absl::span + absl::utility + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/crc) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_cpu_detect + HDRS + "${DIR}/internal/cpu_detect.h" + SRCS + "${DIR}/internal/cpu_detect.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_internal + HDRS + "${DIR}/internal/crc.h" + "${DIR}/internal/crc32_x86_arm_combined_simd.h" + SRCS + "${DIR}/internal/crc.cc" + "${DIR}/internal/crc_internal.h" + "${DIR}/internal/crc_x86_arm_combined.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::config + absl::core_headers + absl::endian + absl::prefetch + absl::raw_logging_internal + absl::memory + absl::bits +) + +absl_cc_library( + NAME + crc32c + HDRS + "${DIR}/crc32c.h" + "${DIR}/internal/crc32c.h" + "${DIR}/internal/crc_memcpy.h" + SRCS + "${DIR}/crc32c.cc" + "${DIR}/internal/crc32c_inline.h" + "${DIR}/internal/crc_memcpy_fallback.cc" + "${DIR}/internal/crc_memcpy_x86_arm_combined.cc" + "${DIR}/internal/crc_non_temporal_memcpy.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::crc_internal + absl::non_temporal_memcpy + absl::config + absl::core_headers + absl::endian + absl::prefetch + absl::str_format + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_arm_intrinsics + HDRS + "${DIR}/internal/non_temporal_arm_intrinsics.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_memcpy + HDRS + "${DIR}/internal/non_temporal_memcpy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::non_temporal_arm_intrinsics + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + crc_cord_state + HDRS + "${DIR}/internal/crc_cord_state.h" + SRCS + "${DIR}/internal/crc_cord_state.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc32c + absl::config + absl::strings +) + +set(DIR ${ABSL_ROOT_DIR}/absl/debugging) + +absl_cc_library( + NAME + stacktrace + HDRS + "${DIR}/stacktrace.h" + "${DIR}/internal/stacktrace_aarch64-inl.inc" + "${DIR}/internal/stacktrace_arm-inl.inc" + "${DIR}/internal/stacktrace_config.h" + "${DIR}/internal/stacktrace_emscripten-inl.inc" + "${DIR}/internal/stacktrace_generic-inl.inc" + "${DIR}/internal/stacktrace_powerpc-inl.inc" + "${DIR}/internal/stacktrace_riscv-inl.inc" + "${DIR}/internal/stacktrace_unimplemented-inl.inc" + "${DIR}/internal/stacktrace_win32-inl.inc" + "${DIR}/internal/stacktrace_x86-inl.inc" + SRCS + "${DIR}/stacktrace.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::debugging_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + symbolize + HDRS + "${DIR}/symbolize.h" + "${DIR}/internal/symbolize.h" + SRCS + "${DIR}/symbolize.cc" + "${DIR}/symbolize_darwin.inc" + "${DIR}/symbolize_elf.inc" + "${DIR}/symbolize_emscripten.inc" + "${DIR}/symbolize_unimplemented.inc" + "${DIR}/symbolize_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::debugging_internal + absl::demangle_internal + absl::base + absl::config + absl::core_headers + absl::dynamic_annotations + absl::malloc_internal + absl::raw_logging_internal + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + examine_stack + HDRS + "${DIR}/internal/examine_stack.h" + SRCS + "${DIR}/internal/examine_stack.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::stacktrace + absl::symbolize + absl::config + absl::core_headers + absl::raw_logging_internal +) + +absl_cc_library( + NAME + failure_signal_handler + HDRS + "${DIR}/failure_signal_handler.h" + SRCS + "${DIR}/failure_signal_handler.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::examine_stack + absl::stacktrace + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + debugging_internal + HDRS + "${DIR}/internal/address_is_readable.h" + "${DIR}/internal/elf_mem_image.h" + "${DIR}/internal/vdso_support.h" + SRCS + "${DIR}/internal/address_is_readable.cc" + "${DIR}/internal/elf_mem_image.cc" + "${DIR}/internal/vdso_support.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::config + absl::dynamic_annotations + absl::errno_saver + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + demangle_internal + HDRS + "${DIR}/internal/demangle.h" + SRCS + "${DIR}/internal/demangle.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::core_headers + PUBLIC +) + +absl_cc_library( + NAME + leak_check + HDRS + "${DIR}/leak_check.h" + SRCS + "${DIR}/leak_check.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +# component target +absl_cc_library( + NAME + debugging + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::stacktrace + absl::leak_check + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/flags) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_path_util + HDRS + "${DIR}/internal/path_util.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_program_name + SRCS + "${DIR}/internal/program_name.cc" + HDRS + "${DIR}/internal/program_name.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::flags_path_util + absl::strings + absl::synchronization + PUBLIC +) + +absl_cc_library( + NAME + flags_config + SRCS + "${DIR}/usage_config.cc" + HDRS + "${DIR}/config.h" + "${DIR}/usage_config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_path_util + absl::flags_program_name + absl::core_headers + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_marshalling + SRCS + "${DIR}/marshalling.cc" + HDRS + "${DIR}/marshalling.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::int128 + absl::optional + absl::strings + absl::str_format +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_commandlineflag_internal + SRCS + "${DIR}/internal/commandlineflag.cc" + HDRS + "${DIR}/internal/commandlineflag.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::dynamic_annotations + absl::fast_type_id +) + +absl_cc_library( + NAME + flags_commandlineflag + SRCS + "${DIR}/commandlineflag.cc" + HDRS + "${DIR}/commandlineflag.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::fast_type_id + absl::flags_commandlineflag_internal + absl::optional + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_private_handle_accessor + SRCS + "${DIR}/internal/private_handle_accessor.cc" + HDRS + "${DIR}/internal/private_handle_accessor.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::strings +) + +absl_cc_library( + NAME + flags_reflection + SRCS + "${DIR}/reflection.cc" + HDRS + "${DIR}/reflection.h" + "${DIR}/internal/registry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_private_handle_accessor + absl::flags_config + absl::strings + absl::synchronization + absl::flat_hash_map +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_internal + SRCS + "${DIR}/internal/flag.cc" + HDRS + "${DIR}/internal/flag.h" + "${DIR}/internal/sequence_lock.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::flags_config + absl::flags_marshalling + absl::synchronization + absl::meta + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + flags + SRCS + "${DIR}/flag.cc" + HDRS + "${DIR}/declare.h" + "${DIR}/flag.h" + "${DIR}/internal/flag_msvc.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_config + absl::flags_internal + absl::flags_reflection + absl::base + absl::core_headers + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_usage_internal + SRCS + "${DIR}/internal/usage.cc" + HDRS + "${DIR}/internal/usage.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_config + absl::flags + absl::flags_commandlineflag + absl::flags_internal + absl::flags_path_util + absl::flags_private_handle_accessor + absl::flags_program_name + absl::flags_reflection + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_usage + SRCS + "${DIR}/usage.cc" + HDRS + "${DIR}/usage.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::flags_usage_internal + absl::raw_logging_internal + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_parse + SRCS + "${DIR}/parse.cc" + HDRS + "${DIR}/internal/parse.h" + "${DIR}/parse.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::algorithm_container + absl::config + absl::core_headers + absl::flags_config + absl::flags + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::flags_internal + absl::flags_private_handle_accessor + absl::flags_program_name + absl::flags_reflection + absl::flags_usage + absl::strings + absl::synchronization +) + +set(DIR ${ABSL_ROOT_DIR}/absl/functional) + +absl_cc_library( + NAME + any_invocable + SRCS + "${DIR}/internal/any_invocable.h" + HDRS + "${DIR}/any_invocable.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bind_front + SRCS + "${DIR}/internal/front_binder.h" + HDRS + "${DIR}/bind_front.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::compressed_tuple + PUBLIC +) + +absl_cc_library( + NAME + function_ref + SRCS + "${DIR}/internal/function_ref.h" + HDRS + "${DIR}/function_ref.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::any_invocable + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/hash) + +absl_cc_library( + NAME + hash + HDRS + "${DIR}/hash.h" + SRCS + "${DIR}/internal/hash.cc" + "${DIR}/internal/hash.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::city + absl::config + absl::core_headers + absl::endian + absl::fixed_array + absl::function_ref + absl::meta + absl::int128 + absl::strings + absl::optional + absl::variant + absl::utility + absl::low_level_hash + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + city + HDRS + "${DIR}/internal/city.h" + SRCS + "${DIR}/internal/city.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::endian +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + low_level_hash + HDRS + "${DIR}/internal/low_level_hash.h" + SRCS + "${DIR}/internal/low_level_hash.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::endian + absl::int128 + absl::prefetch +) + +set(DIR ${ABSL_ROOT_DIR}/absl/log) + +# Internal targets +absl_cc_library( + NAME + log_internal_check_impl + SRCS + HDRS + "${DIR}/internal/check_impl.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log_internal_check_op + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip +) + +absl_cc_library( + NAME + log_internal_check_op + SRCS + "${DIR}/internal/check_op.cc" + HDRS + "${DIR}/internal/check_op.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_nullguard + absl::log_internal_nullstream + absl::log_internal_strip + absl::strings +) + +absl_cc_library( + NAME + log_internal_conditions + SRCS + "${DIR}/internal/conditions.cc" + HDRS + "${DIR}/internal/conditions.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::log_internal_voidify +) + +absl_cc_library( + NAME + log_internal_config + SRCS + HDRS + "${DIR}/internal/config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + log_internal_flags + SRCS + HDRS + "${DIR}/internal/flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::flags +) + +absl_cc_library( + NAME + log_internal_format + SRCS + "${DIR}/internal/log_format.cc" + HDRS + "${DIR}/internal/log_format.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_append_truncated + absl::log_internal_config + absl::log_internal_globals + absl::log_severity + absl::strings + absl::str_format + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_globals + SRCS + "${DIR}/internal/globals.cc" + HDRS + "${DIR}/internal/globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::raw_logging_internal + absl::strings + absl::time +) + +absl_cc_library( + NAME + log_internal_log_impl + SRCS + HDRS + "${DIR}/internal/log_impl.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip +) + +absl_cc_library( + NAME + log_internal_proto + SRCS + "${DIR}/internal/proto.cc" + HDRS + "${DIR}/internal/proto.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::strings + absl::span +) + +absl_cc_library( + NAME + log_internal_message + SRCS + "${DIR}/internal/log_message.cc" + HDRS + "${DIR}/internal/log_message.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::errno_saver + absl::inlined_vector + absl::examine_stack + absl::log_internal_append_truncated + absl::log_internal_format + absl::log_internal_globals + absl::log_internal_proto + absl::log_internal_log_sink_set + absl::log_internal_nullguard + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::log_sink_registry + absl::memory + absl::raw_logging_internal + absl::strings + absl::strerror + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_log_sink_set + SRCS + "${DIR}/internal/log_sink_set.cc" + HDRS + "${DIR}/internal/log_sink_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$:-llog> + DEPS + absl::base + absl::cleanup + absl::config + absl::core_headers + absl::log_internal_config + absl::log_internal_globals + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::raw_logging_internal + absl::synchronization + absl::span + absl::strings +) + +absl_cc_library( + NAME + log_internal_nullguard + SRCS + "${DIR}/internal/nullguard.cc" + HDRS + "${DIR}/internal/nullguard.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + log_internal_nullstream + SRCS + HDRS + "${DIR}/internal/nullstream.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::strings +) + +absl_cc_library( + NAME + log_internal_strip + SRCS + HDRS + "${DIR}/internal/strip.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_message + absl::log_internal_nullstream + absl::log_severity +) + +absl_cc_library( + NAME + log_internal_voidify + SRCS + HDRS + "${DIR}/internal/voidify.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + log_internal_append_truncated + SRCS + HDRS + "${DIR}/internal/append_truncated.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings + absl::span +) + +# Public targets +absl_cc_library( + NAME + absl_check + SRCS + HDRS + "${DIR}/absl_check.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_check_impl + PUBLIC +) + +absl_cc_library( + NAME + absl_log + SRCS + HDRS + "${DIR}/absl_log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_log_impl + PUBLIC +) + +absl_cc_library( + NAME + check + SRCS + HDRS + "${DIR}/check.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_check_impl + absl::core_headers + absl::log_internal_check_op + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip + PUBLIC +) + +absl_cc_library( + NAME + die_if_null + SRCS + "${DIR}/die_if_null.cc" + HDRS + "${DIR}/die_if_null.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_flags + SRCS + "${DIR}/flags.cc" + HDRS + "${DIR}/flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_globals + absl::log_severity + absl::log_internal_config + absl::log_internal_flags + absl::flags + absl::flags_marshalling + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_globals + SRCS + "${DIR}/globals.cc" + HDRS + "${DIR}/globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::hash + absl::log_severity + absl::raw_logging_internal + absl::strings +) + +absl_cc_library( + NAME + log_initialize + SRCS + "${DIR}/initialize.cc" + HDRS + "${DIR}/initialize.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_globals + absl::log_internal_globals + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log + SRCS + HDRS + "${DIR}/log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_log_impl + PUBLIC +) + +absl_cc_library( + NAME + log_entry + SRCS + "${DIR}/log_entry.cc" + HDRS + "${DIR}/log_entry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_config + absl::log_severity + absl::span + absl::strings + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log_sink + SRCS + "${DIR}/log_sink.cc" + HDRS + "${DIR}/log_sink.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_entry + PUBLIC +) + +absl_cc_library( + NAME + log_sink_registry + SRCS + HDRS + "${DIR}/log_sink_registry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_sink + absl::log_internal_log_sink_set + PUBLIC +) + +absl_cc_library( + NAME + log_streamer + SRCS + HDRS + "${DIR}/log_streamer.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::absl_log + absl::log_severity + absl::optional + absl::strings + absl::strings_internal + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + log_internal_structured + HDRS + "${DIR}/internal/structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_message + absl::strings +) + +absl_cc_library( + NAME + log_structured + HDRS + "${DIR}/structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_structured + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_internal_fnmatch + SRCS + "${DIR}/internal/fnmatch.cc" + HDRS + "${DIR}/internal/fnmatch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings +) + +set(DIR ${ABSL_ROOT_DIR}/absl/memory) + +absl_cc_library( + NAME + memory + HDRS + "${DIR}/memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/meta) + +absl_cc_library( + NAME + type_traits + HDRS + "${DIR}/type_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +# component target +absl_cc_library( + NAME + meta + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/numeric) + +absl_cc_library( + NAME + bits + HDRS + "${DIR}/bits.h" + "${DIR}/internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + PUBLIC +) + +absl_cc_library( + NAME + int128 + HDRS + "${DIR}/int128.h" + SRCS + "${DIR}/int128.cc" + "${DIR}/int128_have_intrinsic.inc" + "${DIR}/int128_no_intrinsic.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::bits + PUBLIC +) + +# component target +absl_cc_library( + NAME + numeric + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::int128 + PUBLIC +) + +absl_cc_library( + NAME + numeric_representation + HDRS + "${DIR}/internal/representation.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + sample_recorder + HDRS + "${DIR}/internal/sample_recorder.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::synchronization +) + +set(DIR ${ABSL_ROOT_DIR}/absl/profiling) + +absl_cc_library( + NAME + exponential_biased + SRCS + "${DIR}/internal/exponential_biased.cc" + HDRS + "${DIR}/internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "${DIR}/internal/periodic_sampler.cc" + HDRS + "${DIR}/internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +set(DIR ${ABSL_ROOT_DIR}/absl/random) + +absl_cc_library( + NAME + random_random + HDRS + "${DIR}/random.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_distributions + absl::random_internal_nonsecure_base + absl::random_internal_pcg_engine + absl::random_internal_pool_urbg + absl::random_internal_randen_engine + absl::random_seed_sequences +) + +absl_cc_library( + NAME + random_bit_gen_ref + HDRS + "${DIR}/bit_gen_ref.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::random_internal_distribution_caller + absl::random_internal_fast_uniform_bits + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_mock_helpers + HDRS + "${DIR}/internal/mock_helpers.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::fast_type_id + absl::optional +) + +absl_cc_library( + NAME + random_distributions + SRCS + "${DIR}/discrete_distribution.cc" + "${DIR}/gaussian_distribution.cc" + HDRS + "${DIR}/bernoulli_distribution.h" + "${DIR}/beta_distribution.h" + "${DIR}/discrete_distribution.h" + "${DIR}/distributions.h" + "${DIR}/exponential_distribution.h" + "${DIR}/gaussian_distribution.h" + "${DIR}/log_uniform_int_distribution.h" + "${DIR}/poisson_distribution.h" + "${DIR}/uniform_int_distribution.h" + "${DIR}/uniform_real_distribution.h" + "${DIR}/zipf_distribution.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::random_internal_generate_real + absl::random_internal_distribution_caller + absl::random_internal_fast_uniform_bits + absl::random_internal_fastmath + absl::random_internal_iostream_state_saver + absl::random_internal_traits + absl::random_internal_uniform_helper + absl::random_internal_wide_multiply + absl::strings + absl::type_traits +) + +absl_cc_library( + NAME + random_seed_gen_exception + SRCS + "${DIR}/seed_gen_exception.cc" + HDRS + "${DIR}/seed_gen_exception.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + random_seed_sequences + SRCS + "${DIR}/seed_sequences.cc" + HDRS + "${DIR}/seed_sequences.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::inlined_vector + absl::random_internal_pool_urbg + absl::random_internal_salted_seed_seq + absl::random_internal_seed_material + absl::random_seed_gen_exception + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_traits + HDRS + "${DIR}/internal/traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_distribution_caller + HDRS + "${DIR}/internal/distribution_caller.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::utility + absl::fast_type_id +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_fast_uniform_bits + HDRS + "${DIR}/internal/fast_uniform_bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_seed_material + SRCS + "${DIR}/internal/seed_material.cc" + HDRS + "${DIR}/internal/seed_material.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::optional + absl::random_internal_fast_uniform_bits + absl::raw_logging_internal + absl::span + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_pool_urbg + SRCS + "${DIR}/internal/pool_urbg.cc" + HDRS + "${DIR}/internal/pool_urbg.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::endian + absl::random_internal_randen + absl::random_internal_seed_material + absl::random_internal_traits + absl::random_seed_gen_exception + absl::raw_logging_internal + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_salted_seed_seq + HDRS + "${DIR}/internal/salted_seed_seq.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::inlined_vector + absl::optional + absl::span + absl::random_internal_seed_material + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_iostream_state_saver + HDRS + "${DIR}/internal/iostream_state_saver.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::int128 + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_generate_real + HDRS + "${DIR}/internal/generate_real.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits + absl::random_internal_fastmath + absl::random_internal_traits + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_wide_multiply + HDRS + "${DIR}/internal/wide_multiply.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits + absl::config + absl::int128 +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_fastmath + HDRS + "${DIR}/internal/fastmath.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_nonsecure_base + HDRS + "${DIR}/internal/nonsecure_base.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::inlined_vector + absl::random_internal_pool_urbg + absl::random_internal_salted_seed_seq + absl::random_internal_seed_material + absl::span + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_pcg_engine + HDRS + "${DIR}/internal/pcg_engine.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::int128 + absl::random_internal_fastmath + absl::random_internal_iostream_state_saver + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_engine + HDRS + "${DIR}/internal/randen_engine.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::endian + absl::random_internal_iostream_state_saver + absl::random_internal_randen + absl::raw_logging_internal + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_platform + HDRS + "${DIR}/internal/randen_traits.h" + "${DIR}/internal/platform.h" + SRCS + "${DIR}/internal/randen_round_keys.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen + SRCS + "${DIR}/internal/randen.cc" + HDRS + "${DIR}/internal/randen.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::random_internal_randen_hwaes + absl::random_internal_randen_slow +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_slow + SRCS + "${DIR}/internal/randen_slow.cc" + HDRS + "${DIR}/internal/randen_slow.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_hwaes + SRCS + "${DIR}/internal/randen_detect.cc" + HDRS + "${DIR}/internal/randen_detect.h" + "${DIR}/internal/randen_hwaes.h" + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_RANDOM_RANDEN_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::random_internal_randen_hwaes_impl + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_hwaes_impl + SRCS + "${DIR}/internal/randen_hwaes.cc" + "${DIR}/internal/randen_hwaes.h" + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_RANDOM_RANDEN_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_uniform_helper + HDRS + "${DIR}/internal/uniform_helper.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::random_internal_traits + absl::type_traits +) + +set(DIR ${ABSL_ROOT_DIR}/absl/status) + +absl_cc_library( + NAME + status + HDRS + "${DIR}/status.h" + SRCS + "${DIR}/internal/status_internal.h" + "${DIR}/internal/status_internal.cc" + "${DIR}/status.cc" + "${DIR}/status_payload_printer.h" + "${DIR}/status_payload_printer.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEFINES + "$<$:_LINUX_SOURCE_COMPAT>" + DEPS + absl::atomic_hook + absl::config + absl::cord + absl::core_headers + absl::function_ref + absl::inlined_vector + absl::memory + absl::optional + absl::raw_logging_internal + absl::span + absl::stacktrace + absl::strerror + absl::str_format + absl::strings + absl::symbolize + PUBLIC +) + +absl_cc_library( + NAME + statusor + HDRS + "${DIR}/statusor.h" + SRCS + "${DIR}/statusor.cc" + "${DIR}/internal/statusor_internal.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + absl::status + absl::strings + absl::type_traits + absl::utility + absl::variant + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/strings) + +absl_cc_library( + NAME + string_view + HDRS + "${DIR}/string_view.h" + SRCS + "${DIR}/string_view.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::throw_delegate + PUBLIC +) + +absl_cc_library( + NAME + strings + HDRS + "${DIR}/ascii.h" + "${DIR}/charconv.h" + "${DIR}/escaping.h" + "${DIR}/has_absl_stringify.h" + "${DIR}/internal/damerau_levenshtein_distance.h" + "${DIR}/internal/string_constant.h" + "${DIR}/match.h" + "${DIR}/numbers.h" + "${DIR}/str_cat.h" + "${DIR}/str_join.h" + "${DIR}/str_replace.h" + "${DIR}/str_split.h" + "${DIR}/strip.h" + "${DIR}/substitute.h" + SRCS + "${DIR}/ascii.cc" + "${DIR}/charconv.cc" + "${DIR}/escaping.cc" + "${DIR}/internal/charconv_bigint.cc" + "${DIR}/internal/charconv_bigint.h" + "${DIR}/internal/charconv_parse.cc" + "${DIR}/internal/charconv_parse.h" + "${DIR}/internal/damerau_levenshtein_distance.cc" + "${DIR}/internal/memutil.cc" + "${DIR}/internal/memutil.h" + "${DIR}/internal/stringify_sink.h" + "${DIR}/internal/stringify_sink.cc" + "${DIR}/internal/stl_type_traits.h" + "${DIR}/internal/str_join_internal.h" + "${DIR}/internal/str_split_internal.h" + "${DIR}/match.cc" + "${DIR}/numbers.cc" + "${DIR}/str_cat.cc" + "${DIR}/str_replace.cc" + "${DIR}/str_split.cc" + "${DIR}/substitute.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::string_view + absl::strings_internal + absl::base + absl::bits + absl::charset + absl::config + absl::core_headers + absl::endian + absl::int128 + absl::memory + absl::raw_logging_internal + absl::throw_delegate + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + charset + HDRS + charset.h + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::string_view + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + strings_internal + HDRS + "${DIR}/internal/escaping.cc" + "${DIR}/internal/escaping.h" + "${DIR}/internal/ostringstream.h" + "${DIR}/internal/resize_uninitialized.h" + "${DIR}/internal/utf8.h" + SRCS + "${DIR}/internal/ostringstream.cc" + "${DIR}/internal/utf8.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::endian + absl::raw_logging_internal + absl::type_traits +) + +absl_cc_library( + NAME + str_format + HDRS + "${DIR}/str_format.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::str_format_internal + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + str_format_internal + HDRS + "${DIR}/internal/str_format/arg.h" + "${DIR}/internal/str_format/bind.h" + "${DIR}/internal/str_format/checker.h" + "${DIR}/internal/str_format/constexpr_parser.h" + "${DIR}/internal/str_format/extension.h" + "${DIR}/internal/str_format/float_conversion.h" + "${DIR}/internal/str_format/output.h" + "${DIR}/internal/str_format/parser.h" + SRCS + "${DIR}/internal/str_format/arg.cc" + "${DIR}/internal/str_format/bind.cc" + "${DIR}/internal/str_format/extension.cc" + "${DIR}/internal/str_format/float_conversion.cc" + "${DIR}/internal/str_format/output.cc" + "${DIR}/internal/str_format/parser.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::strings + absl::config + absl::core_headers + absl::inlined_vector + absl::numeric_representation + absl::type_traits + absl::utility + absl::int128 + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cord_internal + HDRS + "${DIR}/internal/cord_data_edge.h" + "${DIR}/internal/cord_internal.h" + "${DIR}/internal/cord_rep_btree.h" + "${DIR}/internal/cord_rep_btree_navigator.h" + "${DIR}/internal/cord_rep_btree_reader.h" + "${DIR}/internal/cord_rep_crc.h" + "${DIR}/internal/cord_rep_consume.h" + "${DIR}/internal/cord_rep_flat.h" + SRCS + "${DIR}/internal/cord_internal.cc" + "${DIR}/internal/cord_rep_btree.cc" + "${DIR}/internal/cord_rep_btree_navigator.cc" + "${DIR}/internal/cord_rep_btree_reader.cc" + "${DIR}/internal/cord_rep_crc.cc" + "${DIR}/internal/cord_rep_consume.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::compressed_tuple + absl::config + absl::container_memory + absl::core_headers + absl::crc_cord_state + absl::endian + absl::inlined_vector + absl::layout + absl::raw_logging_internal + absl::strings + absl::throw_delegate + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_update_tracker + HDRS + "${DIR}/internal/cordz_update_tracker.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_functions + HDRS + "${DIR}/internal/cordz_functions.h" + SRCS + "${DIR}/internal/cordz_functions.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::exponential_biased + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_statistics + HDRS + "${DIR}/internal/cordz_statistics.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::cordz_update_tracker + absl::synchronization +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_handle + HDRS + "${DIR}/internal/cordz_handle.h" + SRCS + "${DIR}/internal/cordz_handle.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::raw_logging_internal + absl::synchronization +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_info + HDRS + "${DIR}/internal/cordz_info.h" + SRCS + "${DIR}/internal/cordz_info.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::cord_internal + absl::cordz_functions + absl::cordz_handle + absl::cordz_statistics + absl::cordz_update_tracker + absl::core_headers + absl::inlined_vector + absl::span + absl::raw_logging_internal + absl::stacktrace + absl::synchronization + absl::time +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_sample_token + HDRS + "${DIR}/internal/cordz_sample_token.h" + SRCS + "${DIR}/internal/cordz_sample_token.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cordz_handle + absl::cordz_info +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_update_scope + HDRS + "${DIR}/internal/cordz_update_scope.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cord_internal + absl::cordz_info + absl::cordz_update_tracker + absl::core_headers +) + +absl_cc_library( + NAME + cord + HDRS + "${DIR}/cord.h" + "${DIR}/cord_buffer.h" + SRCS + "${DIR}/cord.cc" + "${DIR}/cord_analysis.cc" + "${DIR}/cord_analysis.h" + "${DIR}/cord_buffer.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::cord_internal + absl::cordz_functions + absl::cordz_info + absl::cordz_update_scope + absl::cordz_update_tracker + absl::core_headers + absl::crc32c + absl::crc_cord_state + absl::endian + absl::function_ref + absl::inlined_vector + absl::optional + absl::raw_logging_internal + absl::span + absl::strings + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/synchronization) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + graphcycles_internal + HDRS + "${DIR}/internal/graphcycles.h" + SRCS + "${DIR}/internal/graphcycles.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::malloc_internal + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + kernel_timeout_internal + HDRS + "${DIR}/internal/kernel_timeout.h" + SRCS + "${DIR}/internal/kernel_timeout.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + absl::time +) + +absl_cc_library( + NAME + synchronization + HDRS + "${DIR}/barrier.h" + "${DIR}/blocking_counter.h" + "${DIR}/internal/create_thread_identity.h" + "${DIR}/internal/futex.h" + "${DIR}/internal/futex_waiter.h" + "${DIR}/internal/per_thread_sem.h" + "${DIR}/internal/pthread_waiter.h" + "${DIR}/internal/sem_waiter.h" + "${DIR}/internal/stdcpp_waiter.h" + "${DIR}/internal/waiter.h" + "${DIR}/internal/waiter_base.h" + "${DIR}/internal/win32_waiter.h" + "${DIR}/mutex.h" + "${DIR}/notification.h" + SRCS + "${DIR}/barrier.cc" + "${DIR}/blocking_counter.cc" + "${DIR}/internal/create_thread_identity.cc" + "${DIR}/internal/futex_waiter.cc" + "${DIR}/internal/per_thread_sem.cc" + "${DIR}/internal/pthread_waiter.cc" + "${DIR}/internal/sem_waiter.cc" + "${DIR}/internal/stdcpp_waiter.cc" + "${DIR}/internal/waiter_base.cc" + "${DIR}/internal/win32_waiter.cc" + "${DIR}/notification.cc" + "${DIR}/mutex.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::graphcycles_internal + absl::kernel_timeout_internal + absl::atomic_hook + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::malloc_internal + absl::raw_logging_internal + absl::stacktrace + absl::symbolize + absl::time + Threads::Threads + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/time) + +absl_cc_library( + NAME + time + HDRS + "${DIR}/civil_time.h" + "${DIR}/clock.h" + "${DIR}/time.h" + SRCS + "${DIR}/civil_time.cc" + "${DIR}/clock.cc" + "${DIR}/duration.cc" + "${DIR}/format.cc" + "${DIR}/internal/get_current_time_chrono.inc" + "${DIR}/internal/get_current_time_posix.inc" + "${DIR}/time.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::civil_time + absl::core_headers + absl::int128 + absl::raw_logging_internal + absl::strings + absl::time_zone + PUBLIC +) + +absl_cc_library( + NAME + civil_time + HDRS + "${DIR}/internal/cctz/include/cctz/civil_time.h" + "${DIR}/internal/cctz/include/cctz/civil_time_detail.h" + SRCS + "${DIR}/internal/cctz/src/civil_time_detail.cc" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + time_zone + HDRS + "${DIR}/internal/cctz/include/cctz/time_zone.h" + "${DIR}/internal/cctz/include/cctz/zone_info_source.h" + SRCS + "${DIR}/internal/cctz/src/time_zone_fixed.cc" + "${DIR}/internal/cctz/src/time_zone_fixed.h" + "${DIR}/internal/cctz/src/time_zone_format.cc" + "${DIR}/internal/cctz/src/time_zone_if.cc" + "${DIR}/internal/cctz/src/time_zone_if.h" + "${DIR}/internal/cctz/src/time_zone_impl.cc" + "${DIR}/internal/cctz/src/time_zone_impl.h" + "${DIR}/internal/cctz/src/time_zone_info.cc" + "${DIR}/internal/cctz/src/time_zone_info.h" + "${DIR}/internal/cctz/src/time_zone_libc.cc" + "${DIR}/internal/cctz/src/time_zone_libc.h" + "${DIR}/internal/cctz/src/time_zone_lookup.cc" + "${DIR}/internal/cctz/src/time_zone_posix.cc" + "${DIR}/internal/cctz/src/time_zone_posix.h" + "${DIR}/internal/cctz/src/tzfile.h" + "${DIR}/internal/cctz/src/zone_info_source.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + Threads::Threads + $<$:-Wl,-framework,CoreFoundation> +) + +set(DIR ${ABSL_ROOT_DIR}/absl/types) + +absl_cc_library( + NAME + any + HDRS + "${DIR}/any.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_any_cast + absl::config + absl::core_headers + absl::fast_type_id + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bad_any_cast + HDRS + "${DIR}/bad_any_cast.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_any_cast_impl + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + bad_any_cast_impl + SRCS + "${DIR}/bad_any_cast.h" + "${DIR}/bad_any_cast.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_library( + NAME + span + HDRS + "${DIR}/span.h" + SRCS + "${DIR}/internal/span.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::throw_delegate + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + optional + HDRS + "${DIR}/optional.h" + SRCS + "${DIR}/internal/optional.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_optional_access + absl::base_internal + absl::config + absl::core_headers + absl::memory + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bad_optional_access + HDRS + "${DIR}/bad_optional_access.h" + SRCS + "${DIR}/bad_optional_access.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + bad_variant_access + HDRS + "${DIR}/bad_variant_access.h" + SRCS + "${DIR}/bad_variant_access.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + variant + HDRS + "${DIR}/variant.h" + SRCS + "${DIR}/internal/variant.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_variant_access + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + compare + HDRS + "${DIR}/compare.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/utility) + +absl_cc_library( + NAME + utility + HDRS + "${DIR}/utility.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + if_constexpr + HDRS + "${DIR}/internal/if_constexpr.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) -set(ABSL_PROPAGATE_CXX_STD ON) -add_subdirectory("${ABSL_ROOT_DIR}" "${ClickHouse_BINARY_DIR}/contrib/abseil-cpp") add_library(_abseil_swiss_tables INTERFACE) - -target_link_libraries(_abseil_swiss_tables INTERFACE - absl::flat_hash_map - absl::flat_hash_set -) - -get_target_property(FLAT_HASH_MAP_INCLUDE_DIR absl::flat_hash_map INTERFACE_INCLUDE_DIRECTORIES) -target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${FLAT_HASH_MAP_INCLUDE_DIR}) - -get_target_property(FLAT_HASH_SET_INCLUDE_DIR absl::flat_hash_set INTERFACE_INCLUDE_DIRECTORIES) -target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${FLAT_HASH_SET_INCLUDE_DIR}) - +target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${ABSL_ROOT_DIR}) add_library(ch_contrib::abseil_swiss_tables ALIAS _abseil_swiss_tables) - -set(ABSL_FORMAT_SRC - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/arg.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/bind.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/extension.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/float_conversion.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/output.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/parser.cc -) - -add_library(_abseil_str_format ${ABSL_FORMAT_SRC}) -target_include_directories(_abseil_str_format PUBLIC ${ABSL_ROOT_DIR}) - -add_library(ch_contrib::abseil_str_format ALIAS _abseil_str_format) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 71133451889..96d1f4adda7 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -77,16 +77,16 @@ set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers") set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers") set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include") -# set flatbuffers CMake options -set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library") -set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library") -set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests") +set(FLATBUFFERS_SRCS + ${FLATBUFFERS_SRC_DIR}/src/idl_parser.cpp + ${FLATBUFFERS_SRC_DIR}/src/idl_gen_text.cpp + ${FLATBUFFERS_SRC_DIR}/src/reflection.cpp + ${FLATBUFFERS_SRC_DIR}/src/util.cpp) -add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}") +add_library(_flatbuffers STATIC ${FLATBUFFERS_SRCS}) +target_include_directories(_flatbuffers PUBLIC ${FLATBUFFERS_INCLUDE_DIR}) +target_compile_definitions(_flatbuffers PRIVATE -DFLATBUFFERS_LOCALE_INDEPENDENT=0) -add_library(_flatbuffers INTERFACE) -target_link_libraries(_flatbuffers INTERFACE flatbuffers) -target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR}) # === hdfs # NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/" @@ -127,7 +127,6 @@ set(ORC_SRCS "${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh" "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" "${ORC_SOURCE_SRC_DIR}/ByteRLE.hh" - "${ORC_SOURCE_SRC_DIR}/CMakeLists.txt" "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.hh" diff --git a/contrib/aws-cmake/AwsSIMD.cmake b/contrib/aws-cmake/AwsSIMD.cmake index a2f50f27d4e..24f7628e86f 100644 --- a/contrib/aws-cmake/AwsSIMD.cmake +++ b/contrib/aws-cmake/AwsSIMD.cmake @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0. if (USE_CPU_EXTENSIONS) - if (HAVE_AVX2) + if (ENABLE_AVX2) set (AVX2_CFLAGS "-mavx -mavx2") set (HAVE_AVX2_INTRINSICS 1) set (HAVE_MM256_EXTRACT_EPI64 1) diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index 7aba81259d3..bb44c993e79 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -48,9 +48,8 @@ set(AZURE_SDK_INCLUDES "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/" ) -include("${AZURE_DIR}/cmake-modules/AzureTransportAdapters.cmake") - add_library(_azure_sdk ${AZURE_SDK_UNIFIED_SRC}) +target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER) # Originally, on Windows azure-core is built with bcrypt and crypt32 by default if (TARGET OpenSSL::SSL) diff --git a/contrib/cassandra-cmake/CMakeLists.txt b/contrib/cassandra-cmake/CMakeLists.txt index 32611e0e151..0082364c130 100644 --- a/contrib/cassandra-cmake/CMakeLists.txt +++ b/contrib/cassandra-cmake/CMakeLists.txt @@ -68,8 +68,7 @@ list(APPEND INCLUDE_DIRS ${CASS_SRC_DIR}/third_party/hdr_histogram ${CASS_SRC_DIR}/third_party/http-parser ${CASS_SRC_DIR}/third_party/mt19937_64 - ${CASS_SRC_DIR}/third_party/rapidjson/rapidjson - ${CASS_SRC_DIR}/third_party/sparsehash/src) + ${CASS_SRC_DIR}/third_party/rapidjson/rapidjson) list(APPEND INCLUDE_DIRS ${CASS_INCLUDE_DIR} ${CASS_SRC_DIR}) @@ -83,10 +82,6 @@ set(HAVE_MEMCPY 1) set(HAVE_LONG_LONG 1) set(HAVE_UINT16_T 1) -configure_file("${CASS_SRC_DIR}/third_party/sparsehash/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/sparsehash/internal/sparseconfig.h") - - - # Determine random availability if (OS_LINUX) #set (HAVE_GETRANDOM 1) - not on every Linux kernel @@ -116,17 +111,17 @@ configure_file( ${CASS_ROOT_DIR}/driver_config.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/driver_config.hpp) - add_library(_cassandra ${SOURCES} $ $ $) -target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip) +target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip ch_contrib::sparsehash) target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS}) target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR}) target_compile_definitions(_cassandra PRIVATE CASS_BUILDING) +target_compile_definitions(_cassandra PRIVATE -DSPARSEHASH_HASH=std::hash -Dsparsehash=google) target_link_libraries(_cassandra ch_contrib::uv) diff --git a/contrib/fastops-cmake/CMakeLists.txt b/contrib/fastops-cmake/CMakeLists.txt index e9aa4803583..1b09b736b2a 100644 --- a/contrib/fastops-cmake/CMakeLists.txt +++ b/contrib/fastops-cmake/CMakeLists.txt @@ -13,12 +13,10 @@ set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops") set(SRCS "") -if(HAVE_AVX) +if(ARCH_AMD64) set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp") set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") -endif() -if(HAVE_AVX2) set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp") set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") endif() diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt index 727121e60b5..dda6dfe85e4 100644 --- a/contrib/google-protobuf-cmake/CMakeLists.txt +++ b/contrib/google-protobuf-cmake/CMakeLists.txt @@ -385,9 +385,25 @@ endif () include("${ClickHouse_SOURCE_DIR}/contrib/google-protobuf-cmake/protobuf_generate.cmake") +# These files needs to be installed to make it possible that users can use well-known protobuf types +set(google_proto_files + ${protobuf_source_dir}/src/google/protobuf/any.proto + ${protobuf_source_dir}/src/google/protobuf/api.proto + ${protobuf_source_dir}/src/google/protobuf/descriptor.proto + ${protobuf_source_dir}/src/google/protobuf/duration.proto + ${protobuf_source_dir}/src/google/protobuf/empty.proto + ${protobuf_source_dir}/src/google/protobuf/field_mask.proto + ${protobuf_source_dir}/src/google/protobuf/source_context.proto + ${protobuf_source_dir}/src/google/protobuf/struct.proto + ${protobuf_source_dir}/src/google/protobuf/timestamp.proto + ${protobuf_source_dir}/src/google/protobuf/type.proto + ${protobuf_source_dir}/src/google/protobuf/wrappers.proto +) + add_library(_protobuf INTERFACE) target_link_libraries(_protobuf INTERFACE _libprotobuf) target_include_directories(_protobuf INTERFACE "${Protobuf_INCLUDE_DIR}") +set_target_properties(_protobuf PROPERTIES google_proto_files "${google_proto_files}") add_library(ch_contrib::protobuf ALIAS _protobuf) add_library(_protoc INTERFACE) diff --git a/contrib/grpc b/contrib/grpc index 740e3dfd973..77b2737a709 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit 740e3dfd97301a52ad8165b65285bcc149d9e817 +Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt index 09ed2fe3f80..b8b5f5580c4 100644 --- a/contrib/grpc-cmake/CMakeLists.txt +++ b/contrib/grpc-cmake/CMakeLists.txt @@ -9,50 +9,14 @@ endif() set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc") set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc") -# Use re2 from ClickHouse contrib, not from gRPC third_party. -set(gRPC_RE2_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_RE2_INCLUDE_DIR "") -set(_gRPC_RE2_LIBRARIES ch_contrib::re2) - -# Use zlib from ClickHouse contrib, not from gRPC third_party. -set(gRPC_ZLIB_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_ZLIB_INCLUDE_DIR "") -set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib) - -# Use protobuf from ClickHouse contrib, not from gRPC third_party. -set(gRPC_PROTOBUF_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_PROTOBUF_LIBRARIES ch_contrib::protobuf) -set(_gRPC_PROTOBUF_PROTOC "protoc") -set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) -set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ch_contrib::protoc) - if(TARGET OpenSSL::SSL) set(gRPC_USE_UNSECURE_LIBRARIES FALSE) else() set(gRPC_USE_UNSECURE_LIBRARIES TRUE) endif() -# Use OpenSSL from ClickHouse contrib, not from gRPC third_party. -set(gRPC_SSL_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_SSL_INCLUDE_DIR "") -set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL) - -# Use abseil-cpp from ClickHouse contrib, not from gRPC third_party. -set(gRPC_ABSL_PROVIDER "clickhouse" CACHE STRING "" FORCE) - -# We don't want to build C# extensions. -set(gRPC_BUILD_CSHARP_EXT OFF) - -# TODO: Remove this. We generally like to compile with C++23 but grpc isn't ready yet. -set (CMAKE_CXX_STANDARD 20) - -set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares) -set(gRPC_CARES_PROVIDER "clickhouse" CACHE STRING "" FORCE) -add_subdirectory("${_gRPC_SOURCE_DIR}" "${_gRPC_BINARY_DIR}") - -# The contrib/grpc/CMakeLists.txt redefined the PROTOBUF_GENERATE_GRPC_CPP() function for its own purposes, -# so we need to redefine it back. -include("${ClickHouse_SOURCE_DIR}/contrib/grpc-cmake/protobuf_generate_grpc.cmake") +include(grpc.cmake) +include(protobuf_generate_grpc.cmake) set(gRPC_CPP_PLUGIN $) set(gRPC_PYTHON_PLUGIN $) diff --git a/contrib/grpc-cmake/grpc.cmake b/contrib/grpc-cmake/grpc.cmake new file mode 100644 index 00000000000..c2488539211 --- /dev/null +++ b/contrib/grpc-cmake/grpc.cmake @@ -0,0 +1,1854 @@ +# This file was edited for ClickHouse. + +# GRPC global cmake file +# This currently builds C and C++ code. +# This file has been automatically generated from a template file. +# Please look at the templates directory instead. +# This file can be regenerated from the template by running +# tools/buildgen/generate_projects.sh +# +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We want to use C++23, but GRPC is not ready +set (CMAKE_CXX_STANDARD 20) + +set(_gRPC_ZLIB_INCLUDE_DIR "") +set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib) + +set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares) + +set(_gRPC_RE2_INCLUDE_DIR "") +set(_gRPC_RE2_LIBRARIES ch_contrib::re2) + +set(_gRPC_SSL_INCLUDE_DIR "") +set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL) + +set(_gRPC_PROTOBUF_LIBRARIES ch_contrib::protobuf) +set(_gRPC_PROTOBUF_PROTOC "protoc") +set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) +set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ch_contrib::protoc) + + +if(UNIX) + if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") + set(_gRPC_PLATFORM_LINUX ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(_gRPC_PLATFORM_MAC ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "iOS") + set(_gRPC_PLATFORM_IOS ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "Android") + set(_gRPC_PLATFORM_ANDROID ON) + else() + set(_gRPC_PLATFORM_POSIX ON) + endif() +endif() + +set(_gRPC_ADDRESS_SORTING_INCLUDE_DIR "${_gRPC_SOURCE_DIR}/third_party/address_sorting/include") +set(_gRPC_ADDRESS_SORTING_LIBRARIES address_sorting) + +set(UPB_ROOT_DIR ${_gRPC_SOURCE_DIR}/third_party/upb) + +set(_gRPC_UPB_INCLUDE_DIR "${UPB_ROOT_DIR}" "${_gRPC_SOURCE_DIR}/third_party/utf8_range") +set(_gRPC_UPB_GRPC_GENERATED_DIR "${_gRPC_SOURCE_DIR}/src//core/ext/upb-generated" "${_gRPC_SOURCE_DIR}/src//core/ext/upbdefs-generated") + +set(_gRPC_UPB_LIBRARIES upb) + +set(_gRPC_XXHASH_INCLUDE_DIR "${_gRPC_SOURCE_DIR}/third_party/xxhash") + +add_library(address_sorting + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting.c + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting_posix.c + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting_windows.c +) + +target_compile_features(address_sorting PUBLIC cxx_std_14) + +target_include_directories(address_sorting + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(address_sorting + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + +add_library(gpr + ${_gRPC_SOURCE_DIR}/src/core/lib/config/config_vars.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/config_vars_non_generated.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/load_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_local.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/alloc.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/android/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/atm.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/iphone/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/linux/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/linux/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/msys/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/sync_abseil.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/time_precise.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/string_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/wrap_memcpy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/crash.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/examine_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/fork.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/host_port.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/linux/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/mpscq.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/stat.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/thd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/strerror.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/tchar.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/stat.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/thd.cc +) + +target_compile_features(gpr PUBLIC cxx_std_14) + +target_include_directories(gpr + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(gpr + ${_gRPC_ALLTARGETS_LIBRARIES} + absl::base + absl::core_headers + absl::flags + absl::flags_marshalling + absl::any_invocable + absl::memory + absl::random_random + absl::status + absl::cord + absl::str_format + absl::strings + absl::synchronization + absl::time + absl::optional + absl::variant +) +if(_gRPC_PLATFORM_ANDROID) + target_link_libraries(gpr + android + log + ) +endif() + + +add_library(grpc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/backend_metrics/backend_metric_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/census/grpc_context.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/channel_idle_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/idle_filter_state.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/config_selector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/dynamic_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy_mapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/endpoint_list.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_override_host.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_wrr_locality.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/service_config_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/polling_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter_legacy_call_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_stream_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client_authority_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/compression_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/message_size/message_size_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/rbac/rbac_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/rbac/rbac_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/stateful_session/stateful_session_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/gcp/metadata_query.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/alpn/alpn.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/server/chttp2_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/decode_huff.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parse_result.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_callbacks.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/write_size_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/config_dump_shared.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/annotations/resource.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/opentelemetry.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/data/accesslog/v3/accesslog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/http/stateful_session/cookie/v3/cookie.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/common/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/filter_state.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/status_code_input.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/percent.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/ratelimit_strategy.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/httpbody.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/migrate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/security.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/versioning.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/security.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/authority.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/cidr.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/context_params.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/extension.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/cel.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/domain.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/http_inputs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/ip.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/cel.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump_shared.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opentelemetry.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/data/accesslog/v3/accesslog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/http/stateful_session/cookie/v3/cookie.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/filter_state.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/status_code_input.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_strategy.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/http.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/cidr.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/cel.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/domain.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/http_inputs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/ip.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/cel.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/certificate_provider_store.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/file_watcher_certificate_provider_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_audit_logger_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_bootstrap.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_bootstrap_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_channel_stack_modifier.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_cluster.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_cluster_specifier_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_common_types.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_health_status.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_fault_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_rbac_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_stateful_session_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_route_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_routing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_server_config_fetcher.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_transport_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/parse_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/sockaddr_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/random_early_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/call_tracer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args_preconditioning.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/promise_based_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/server_call_tracer_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/core_configuration.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/event_log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/histogram_view.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/ares_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cf_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/channel_args_endpoint_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/forkable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/memory_allocator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/resolved_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/shim.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_count.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/iocp.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/win_socket.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/work_queue/basic_work_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/experiments.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/per_cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/ref_counted_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/status_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_averaged_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/validation_errors.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/work_serializer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/handshaker/proxy_mapper_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/dualstack_socket_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_apple.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/systemd_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/vsock.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_object_loader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/matchers/matchers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/activity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/party.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/sleep.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/server_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/api.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/arena.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/memory_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/periodic_update.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/resource_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/thread_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/audit_logging.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/authorization_policy_provider_vtable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/evaluate_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_authorization_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_server_authz_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/matchers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/rbac_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/stdout_logger.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/certificate_provider/certificate_provider_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/alts_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/call_creds_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/channel_creds_registry_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/aws_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/aws_request_signer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/file_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/url_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/credentials_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/google_default_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/iam/iam_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/insecure/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/json_token.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_verifier.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/local/local_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/ssl/ssl_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_match.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/xds/xds_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/alts/alts_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_supported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/local/local_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/tls/tls_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_refcount.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/builtins.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init_internally.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/version.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/batch_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/http_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/parsed_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/tcp_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry_extra.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/aes_gcm.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/gsec.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_counter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_frame_protector.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/frame_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_handshaker_client.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_shared_resource.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/key_logging/ssl_key_logging.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_cache.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc +) + +target_compile_features(grpc PUBLIC cxx_std_14) + +target_include_directories(grpc + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc + ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_RE2_LIBRARIES} + upb_json_lib + upb_textformat_lib + ${_gRPC_ZLIB_LIBRARIES} + absl::algorithm_container + absl::cleanup + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::bind_front + absl::function_ref + absl::hash + absl::type_traits + absl::random_bit_gen_ref + absl::random_distributions + absl::statusor + absl::span + absl::utility + ${_gRPC_CARES_LIBRARIES} + gpr + ${_gRPC_SSL_LIBRARIES} + ${_gRPC_ADDRESS_SORTING_LIBRARIES} +) +if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) + target_link_libraries(grpc "-framework CoreFoundation") +endif() + +add_library(grpc_unsecure + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/backend_metrics/backend_metric_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/census/grpc_context.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/channel_idle_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/idle_filter_state.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/config_selector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/dynamic_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy_mapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/endpoint_list.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/service_config_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/polling_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter_legacy_call_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_stream_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client_authority_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/compression_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/message_size/message_size_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/server/chttp2_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/decode_huff.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parse_result.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_callbacks.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/write_size_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/parse_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/sockaddr_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/random_early_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/call_tracer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args_preconditioning.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/promise_based_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/server_call_tracer_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/core_configuration.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/event_log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/histogram_view.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/ares_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cf_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/channel_args_endpoint_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/forkable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/memory_allocator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/resolved_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/shim.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_count.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/iocp.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/win_socket.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/work_queue/basic_work_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/experiments.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/per_cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/ref_counted_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/status_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_averaged_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/validation_errors.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/work_serializer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/handshaker/proxy_mapper_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/dualstack_socket_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_apple.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/systemd_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/vsock.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_object_loader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/activity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/party.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/sleep.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/server_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/api.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/arena.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/memory_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/periodic_update.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/resource_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/thread_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/authorization_policy_provider_vtable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/evaluate_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_server_authz_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/certificate_provider/certificate_provider_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/call_creds_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/insecure/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_supported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_refcount.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/builtins.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init_internally.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/version.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/batch_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/http_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/parsed_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/tcp_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry_noextra.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${gRPC_ADDITIONAL_DLL_SRC} +) + +target_compile_features(grpc_unsecure PUBLIC cxx_std_14) + +target_include_directories(grpc_unsecure + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc_unsecure + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb + ${_gRPC_ZLIB_LIBRARIES} + absl::algorithm_container + absl::cleanup + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::bind_front + absl::function_ref + absl::hash + absl::type_traits + absl::random_bit_gen_ref + absl::random_distributions + absl::statusor + absl::span + absl::utility + ${_gRPC_CARES_LIBRARIES} + gpr + ${_gRPC_ADDRESS_SORTING_LIBRARIES} +) +if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) + target_link_libraries(grpc_unsecure "-framework CoreFoundation") +endif() + +add_library(upb + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/base/status.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/array.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map_sorter.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/hash/common.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/atoi.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/round_trip.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/strtod.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/unicode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/alloc.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/arena.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/extension_registry.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/internal/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/decode_fast.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/eps_copy_input_stream.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/reader.c +) + +target_compile_features(upb PUBLIC cxx_std_14) + +target_include_directories(upb + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb + ${_gRPC_ALLTARGETS_LIBRARIES} + utf8_range_lib +) + + +add_library(upb_collections_lib + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/base/status.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/array.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map_sorter.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/hash/common.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/alloc.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/arena.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/extension_registry.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/internal/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/message.c +) + +target_compile_features(upb_collections_lib PUBLIC cxx_std_14) + +target_include_directories(upb_collections_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_collections_lib + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + + +add_library(upb_json_lib + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/json/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/json/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_builder.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_pool.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_type.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/desc_state.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_value_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/extension_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/field_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/file_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/method_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/oneof_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/service_def.c +) + +target_compile_features(upb_json_lib PUBLIC cxx_std_14) + +target_include_directories(upb_json_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_json_lib + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb +) + + +add_library(upb_textformat_lib + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_builder.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_pool.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_type.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/desc_state.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_value_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/extension_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/field_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/file_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/method_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/oneof_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/service_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/text/encode.c +) + +target_compile_features(upb_textformat_lib PUBLIC cxx_std_14) + +target_include_directories(upb_textformat_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_textformat_lib + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb +) + + +add_library(utf8_range_lib + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/naive.c + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/range2-neon.c + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/range2-sse.c +) + +target_compile_features(utf8_range_lib PUBLIC cxx_std_14) + +target_include_directories(utf8_range_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(utf8_range_lib + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + +add_library(grpc++ + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/binder_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/channel_create.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/channel_create_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/connection_id_generator.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/endpoint_binder_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/jni_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/security_policy_setting.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/security_policy/binder_security_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/server/binder_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/server/binder_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/transport/binder_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/utils/ndk_binder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/binder_android.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/binder_constants.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/transaction.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/wire_reader_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/wire_writer.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/channel_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_stats_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_internal.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/secure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/xds_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/alarm.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/auth_property_iterator.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_filter.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/completion_queue_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/resource_quota_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/rpc_method.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_create_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_certificate_verifier.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/validate_service_config.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/version_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/async_generic_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/backend_metric_recorder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/channel_argument_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/create_default_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/external_connection_acceptor_impl.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/default_health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service_server_builder_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/insecure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/secure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_builder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/xds_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/thread_manager/thread_manager.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/byte_buffer_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/status.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/string_ref.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/time_cc.cc + ${gRPC_UPB_GEN_DUPL_SRC} +) + +target_compile_features(grpc++ PUBLIC cxx_std_14) + +target_include_directories(grpc++ + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc++ + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc + ${_gRPC_PROTOBUF_LIBRARIES} +) + +add_library(grpc++_unsecure + ${_gRPC_SOURCE_DIR}/src/cpp/client/channel_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_stats_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_internal.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/alarm.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_filter.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/completion_queue_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/insecure_create_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/resource_quota_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/rpc_method.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/validate_service_config.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/version_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/async_generic_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/backend_metric_recorder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/channel_argument_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/create_default_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/external_connection_acceptor_impl.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/default_health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service_server_builder_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/insecure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_builder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/thread_manager/thread_manager.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/byte_buffer_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/status.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/string_ref.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/time_cc.cc + ${gRPC_UPB_GEN_DUPL_SRC} +) + +target_compile_features(grpc++_unsecure PUBLIC cxx_std_14) + +target_include_directories(grpc++_unsecure + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc++_unsecure + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_unsecure + ${_gRPC_PROTOBUF_LIBRARIES} +) + +add_library(grpc_plugin_support + ${_gRPC_SOURCE_DIR}/src/compiler/cpp_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/csharp_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/node_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/objective_c_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/php_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/proto_parser_helper.cc + ${_gRPC_SOURCE_DIR}/src/compiler/python_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/ruby_generator.cc +) + +target_compile_features(grpc_plugin_support PUBLIC cxx_std_14) + +target_include_directories(grpc_plugin_support + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc_plugin_support + ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} +) + + +add_executable(grpc_cpp_plugin + ${_gRPC_SOURCE_DIR}/src/compiler/cpp_plugin.cc +) +target_compile_features(grpc_cpp_plugin PUBLIC cxx_std_14) +target_include_directories(grpc_cpp_plugin + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_SOURCE_DIR}/include + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) + +target_link_libraries(grpc_cpp_plugin + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_plugin_support +) diff --git a/contrib/libpqxx b/contrib/libpqxx index 791d68fd899..c995193a3a1 160000 --- a/contrib/libpqxx +++ b/contrib/libpqxx @@ -1 +1 @@ -Subproject commit 791d68fd89902835133c50435e380ec7a73271b7 +Subproject commit c995193a3a14d71f4711f1f421f65a1a1db64640 diff --git a/contrib/libunwind b/contrib/libunwind index 30cc1d3fd36..40d8eadf96b 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 30cc1d3fd3655a5cfa0ab112fe320fb9fc0a8344 +Subproject commit 40d8eadf96b127d9b22d53ce7a4fc52aaedea965 diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 0d872bae5d1..8f3cd8bd07b 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -20,15 +20,7 @@ set(LIBUNWIND_ASM_SOURCES "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S" "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S") -# CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1] -# Workaround these two issues by compiling as C. -# -# [1]: https://gitlab.kitware.com/cmake/cmake/-/issues/20771 -if (APPLE AND CMAKE_VERSION VERSION_LESS 3.19) - set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C) -else() - enable_language(ASM) -endif() +enable_language(ASM) set(LIBUNWIND_SOURCES ${LIBUNWIND_CXX_SOURCES} diff --git a/contrib/llvm-project-cmake/CMakeLists.txt b/contrib/llvm-project-cmake/CMakeLists.txt index d6133f145bc..406bac73e90 100644 --- a/contrib/llvm-project-cmake/CMakeLists.txt +++ b/contrib/llvm-project-cmake/CMakeLists.txt @@ -61,6 +61,9 @@ set (REQUIRED_LLVM_LIBRARIES LLVMDemangle ) +# Skip useless "install" instructions from CMake: +set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "") + if (ARCH_AMD64) set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "") list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen) diff --git a/contrib/pocketfft b/contrib/pocketfft new file mode 160000 index 00000000000..9efd4da52cf --- /dev/null +++ b/contrib/pocketfft @@ -0,0 +1 @@ +Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546 diff --git a/contrib/pocketfft-cmake/CMakeLists.txt b/contrib/pocketfft-cmake/CMakeLists.txt new file mode 100644 index 00000000000..01911ee4496 --- /dev/null +++ b/contrib/pocketfft-cmake/CMakeLists.txt @@ -0,0 +1,10 @@ +option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_POCKETFFT) + message(STATUS "Not using pocketfft") + return() +endif() + +add_library(_pocketfft INTERFACE) +target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft) +add_library(ch_contrib::pocketfft ALIAS _pocketfft) diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index 4e6c66fe731..7a84048e16b 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -16,8 +16,7 @@ function(GetLibraryVersion _content _outputVar) SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE) endfunction() -FILE(READ "${QPL_PROJECT_DIR}/CMakeLists.txt" HEADER_CONTENT) -GetLibraryVersion("${HEADER_CONTENT}" QPL_VERSION) +set (QPL_VERSION 1.2.0) message(STATUS "Intel QPL version: ${QPL_VERSION}") @@ -28,16 +27,422 @@ message(STATUS "Intel QPL version: ${QPL_VERSION}") # The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link # only upstream isal (ch_contrib::isal) but at this point we can't. -include("${QPL_PROJECT_DIR}/cmake/CompileOptions.cmake") +# ========================================================================== +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: MIT +# ========================================================================== + +set(QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS "-fno-exceptions;-fno-rtti") + +function(modify_standard_language_flag) + # Declaring function parameters + set(OPTIONS "") + set(ONE_VALUE_ARGS + LANGUAGE_NAME + FLAG_NAME + NEW_FLAG_VALUE) + set(MULTI_VALUE_ARGS "") + + # Parsing function parameters + cmake_parse_arguments(MODIFY + "${OPTIONS}" + "${ONE_VALUE_ARGS}" + "${MULTI_VALUE_ARGS}" + ${ARGN}) + + # Variables + set(FLAG_REGULAR_EXPRESSION "${MODIFY_FLAG_NAME}.*[ ]*") + set(NEW_VALUE "${MODIFY_FLAG_NAME}${MODIFY_NEW_FLAG_VALUE}") + + # Replacing specified flag with new value + string(REGEX REPLACE + ${FLAG_REGULAR_EXPRESSION} ${NEW_VALUE} + NEW_COMPILE_FLAGS + "${CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS}") + + # Returning the value + set(CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS ${NEW_COMPILE_FLAGS} PARENT_SCOPE) +endfunction() + +function(get_function_name_with_default_bit_width in_function_name bit_width out_function_name) + + if(in_function_name MATCHES ".*_i") + + string(REPLACE "_i" "" in_function_name ${in_function_name}) + + set(${out_function_name} "${in_function_name}_${bit_width}_i" PARENT_SCOPE) + + else() + + set(${out_function_name} "${in_function_name}_${bit_width}" PARENT_SCOPE) + + endif() + +endfunction() + +macro(get_list_of_supported_optimizations PLATFORMS_LIST) + list(APPEND PLATFORMS_LIST "") + list(APPEND PLATFORMS_LIST "px") + list(APPEND PLATFORMS_LIST "avx512") +endmacro(get_list_of_supported_optimizations) + +function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST) + list(APPEND UNPACK_POSTFIX_LIST "") + list(APPEND UNPACK_PRLE_POSTFIX_LIST "") + list(APPEND PACK_POSTFIX_LIST "") + list(APPEND PACK_INDEX_POSTFIX_LIST "") + list(APPEND SCAN_POSTFIX_LIST "") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "") + list(APPEND DEFAULT_BIT_WIDTH_LIST "") + + #create list of functions that use only 8u 16u 32u postfixes + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "unpack_prle") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract_i") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select_i") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "expand") + + #create default bit width list + list(APPEND DEFAULT_BIT_WIDTH_LIST "8u") + list(APPEND DEFAULT_BIT_WIDTH_LIST "16u") + list(APPEND DEFAULT_BIT_WIDTH_LIST "32u") + + #create scan kernel postfixes + list(APPEND SCAN_COMPARATOR_LIST "") + + list(APPEND SCAN_COMPARATOR_LIST "eq") + list(APPEND SCAN_COMPARATOR_LIST "ne") + list(APPEND SCAN_COMPARATOR_LIST "lt") + list(APPEND SCAN_COMPARATOR_LIST "le") + list(APPEND SCAN_COMPARATOR_LIST "gt") + list(APPEND SCAN_COMPARATOR_LIST "ge") + list(APPEND SCAN_COMPARATOR_LIST "range") + list(APPEND SCAN_COMPARATOR_LIST "not_range") + + foreach(SCAN_COMPARATOR IN LISTS SCAN_COMPARATOR_LIST) + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_8u") + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_16u8u") + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_32u8u") + endforeach() + + # create unpack kernel postfixes + foreach(input_width RANGE 1 32 1) + if(input_width LESS 8 OR input_width EQUAL 8) + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u8u") + + elseif(input_width LESS 16 OR input_width EQUAL 16) + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u16u") + + else() + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u32u") + endif() + endforeach() + + # create pack kernel postfixes + foreach(output_width RANGE 1 8 1) + list(APPEND PACK_POSTFIX_LIST "_8u${output_width}u") + endforeach() + + foreach(output_width RANGE 9 16 1) + list(APPEND PACK_POSTFIX_LIST "_16u${output_width}u") + endforeach() + + foreach(output_width RANGE 17 32 1) + list(APPEND PACK_POSTFIX_LIST "_32u${output_width}u") + endforeach() + + list(APPEND PACK_POSTFIX_LIST "_8u16u") + list(APPEND PACK_POSTFIX_LIST "_8u32u") + list(APPEND PACK_POSTFIX_LIST "_16u32u") + + # create pack index kernel postfixes + list(APPEND PACK_INDEX_POSTFIX_LIST "_nu") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u16u") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u32u") + + # write to file + file(MAKE_DIRECTORY ${current_directory}/generated) + + foreach(PLATFORM_VALUE IN LISTS PLATFORMS_LIST) + set(directory "${current_directory}/generated") + set(PLATFORM_PREFIX "${PLATFORM_VALUE}_") + + # + # Write unpack table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "unpack_table_t ${PLATFORM_PREFIX}unpack_table = {\n") + + #write LE kernels + foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST) + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack${UNPACK_POSTFIX},\n") + endforeach() + + #write BE kernels + + #get last element of the list + set(LAST_ELEMENT "") + list(GET UNPACK_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST) + + if(UNPACK_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "}\n") + + # + # Write pack table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "pack_table_t ${PLATFORM_PREFIX}pack_table = {\n") + + #write LE kernels + foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST) + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack${PACK_POSTFIX},\n") + endforeach() + + #write BE kernels + + #get last element of the list + set(LAST_ELEMENT "") + list(GET PACK_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST) + + if(PACK_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "}\n") + + # + # Write scan table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "scan_table_t ${PLATFORM_PREFIX}scan_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST) + + if(SCAN_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "}\n") + + # + # Write scan_i table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "scan_i_table_t ${PLATFORM_PREFIX}scan_i_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST) + + if(SCAN_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i,\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "}\n") + + # + # Write pack_index table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "pack_index_table_t ${PLATFORM_PREFIX}pack_index_table = {\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_nu,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u32u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_be_nu,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "}\n") + + # + # Write default bit width functions + # + foreach(DEAULT_BIT_WIDTH_FUNCTION IN LISTS DEFAULT_BIT_WIDTH_FUNCTIONS_LIST) + file(WRITE ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "${DEAULT_BIT_WIDTH_FUNCTION}_table_t ${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET DEFAULT_BIT_WIDTH_LIST -1 LAST_ELEMENT) + + foreach(BIT_WIDTH IN LISTS DEFAULT_BIT_WIDTH_LIST) + + set(FUNCTION_NAME "") + get_function_name_with_default_bit_width(${DEAULT_BIT_WIDTH_FUNCTION} ${BIT_WIDTH} FUNCTION_NAME) + + if(BIT_WIDTH STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "}\n") + endforeach() + + # + # Write aggregates table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "aggregates_table_t ${PLATFORM_PREFIX}aggregates_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_bit_aggregates_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "}\n") + + # + # Write mem_copy functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "memory_copy_table_t ${PLATFORM_PREFIX}memory_copy_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "}\n") + + # + # Write mem_copy functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "zero_table_t ${PLATFORM_PREFIX}zero_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "\t${PLATFORM_PREFIX}qplc_zero_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "}\n") + + # + # Write move functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "move_table_t ${PLATFORM_PREFIX}move_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "\t${PLATFORM_PREFIX}qplc_move_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "}\n") + + # + # Write crc64 function table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "crc64_table_t ${PLATFORM_PREFIX}crc64_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "\t${PLATFORM_PREFIX}qplc_crc64};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "}\n") + + # + # Write xor_checksum function table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "xor_checksum_table_t ${PLATFORM_PREFIX}xor_checksum_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "\t${PLATFORM_PREFIX}qplc_xor_checksum_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "}\n") + + # + # Write deflate functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_slow_icf.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_hash_table.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_histogram.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "deflate_table_t ${PLATFORM_PREFIX}deflate_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}slow_deflate_icf_body),\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}deflate_histogram_reset),\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}deflate_hash_table_reset)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "}\n") + + # + # Write deflate fix functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"deflate_slow.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "deflate_fix_table_t ${PLATFORM_PREFIX}deflate_fix_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}slow_deflate_body)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "}\n") + + # + # Write setup_dictionary functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"deflate_slow_utils.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "setup_dictionary_table_t ${PLATFORM_PREFIX}setup_dictionary_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}setup_dictionary)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "}\n") + + endforeach() +endfunction() -# check nasm compiler -include(CheckLanguage) -check_language(ASM_NASM) -if(NOT CMAKE_ASM_NASM_COMPILER) - message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!") -endif() -# [SUBDIR]isal enable_language(ASM_NASM) set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c @@ -107,11 +512,6 @@ set_target_properties(isal PROPERTIES CXX_STANDARD 11 C_STANDARD 99) -target_compile_options(isal PRIVATE - "$<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}>" - "$<$:>" - "$<$:>") - # AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available". # HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system. target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/" @@ -164,15 +564,7 @@ foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST) PUBLIC $ PRIVATE $) - set_target_properties(qplcore_${PLATFORM_ID} PROPERTIES - $<$:C_STANDARD 17>) - - target_compile_options(qplcore_${PLATFORM_ID} - PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS} - PRIVATE "$<$:>" - PRIVATE "$<$:-O3;-D_FORTIFY_SOURCE=2>") - - # Set specific compiler options and/or definitions based on a platform + # Set specific compiler options and/or definitions based on a platform if (${PLATFORM_ID} MATCHES "avx512") target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2) target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512) @@ -221,10 +613,7 @@ set_target_properties(qplcore_sw_dispatcher PROPERTIES CXX_STANDARD 17) target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB) target_compile_options(qplcore_sw_dispatcher - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) # [SUBDIR]core-iaa file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c @@ -249,14 +638,6 @@ target_include_directories(core_iaa PRIVATE $ # own_checkers.h PRIVATE $) -set_target_properties(core_iaa PROPERTIES - $<$:C_STANDARD 17> - CXX_STANDARD 17) - -target_compile_options(core_iaa - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>>) - target_compile_features(core_iaa PRIVATE c_std_11) target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK @@ -286,10 +667,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS $) target_compile_options(middle_layer_lib - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(middle_layer_lib PUBLIC QPL_VERSION="${QPL_VERSION}" @@ -324,15 +702,8 @@ target_include_directories(_qpl PRIVATE $ PRIVATE $) -set_target_properties(_qpl PROPERTIES - $<$:C_STANDARD 17> - CXX_STANDARD 17) - target_compile_options(_qpl - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(_qpl PRIVATE -DQPL_LIB diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh b/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh deleted file mode 100644 index 31017b565b6..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh +++ /dev/null @@ -1,530 +0,0 @@ -#!/bin/bash -ckhost="localhost" -ckport=("9000" "9001" "9002" "9003") -WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." -OUTPUT_DIR="${WORKING_DIR}/output" -LOG_DIR="${OUTPUT_DIR}/log" -RAWDATA_DIR="${WORKING_DIR}/rawdata_dir" -database_dir="${WORKING_DIR}/database_dir" -CLIENT_SCRIPTS_DIR="${WORKING_DIR}/client_scripts" -LOG_PACK_FILE="$(date +%Y-%m-%d-%H-%M-%S)" -QUERY_FILE="queries_ssb.sql" -SERVER_BIND_CMD[0]="numactl -m 0 -N 0" -SERVER_BIND_CMD[1]="numactl -m 0 -N 0" -SERVER_BIND_CMD[2]="numactl -m 1 -N 1" -SERVER_BIND_CMD[3]="numactl -m 1 -N 1" -CLIENT_BIND_CMD="" -SSB_GEN_FACTOR=20 -TABLE_NAME="lineorder_flat" -TALBE_ROWS="119994608" -CODEC_CONFIG="lz4 deflate zstd" - -# define instance number -inst_num=$1 -if [ ! -n "$1" ]; then - echo "Please clarify instance number from 1,2,3 or 4" - exit 1 -else - echo "Benchmarking with instance number:$1" -fi - -if [ ! -d "$OUTPUT_DIR" ]; then -mkdir $OUTPUT_DIR -fi -if [ ! -d "$LOG_DIR" ]; then -mkdir $LOG_DIR -fi -if [ ! -d "$RAWDATA_DIR" ]; then -mkdir $RAWDATA_DIR -fi - -# define different directories -dir_server=("" "_s2" "_s3" "_s4") -ckreadSql=" - CREATE TABLE customer - ( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY (C_CUSTKEY); - - CREATE TABLE lineorder - ( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) - ) - ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); - - CREATE TABLE part - ( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY P_PARTKEY; - - CREATE TABLE supplier - ( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String - ) - ENGINE = MergeTree ORDER BY S_SUPPKEY; -" -supplier_table=" - CREATE TABLE supplier - ( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String - ) - ENGINE = MergeTree ORDER BY S_SUPPKEY; -" -part_table=" - CREATE TABLE part - ( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY P_PARTKEY; -" -lineorder_table=" - CREATE TABLE lineorder - ( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) - ) - ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); -" -customer_table=" - CREATE TABLE customer - ( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY (C_CUSTKEY); -" - -lineorder_flat_table=" - SET max_memory_usage = 20000000000; - CREATE TABLE lineorder_flat - ENGINE = MergeTree - PARTITION BY toYear(LO_ORDERDATE) - ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS - SELECT - l.LO_ORDERKEY AS LO_ORDERKEY, - l.LO_LINENUMBER AS LO_LINENUMBER, - l.LO_CUSTKEY AS LO_CUSTKEY, - l.LO_PARTKEY AS LO_PARTKEY, - l.LO_SUPPKEY AS LO_SUPPKEY, - l.LO_ORDERDATE AS LO_ORDERDATE, - l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, - l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, - l.LO_QUANTITY AS LO_QUANTITY, - l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, - l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, - l.LO_DISCOUNT AS LO_DISCOUNT, - l.LO_REVENUE AS LO_REVENUE, - l.LO_SUPPLYCOST AS LO_SUPPLYCOST, - l.LO_TAX AS LO_TAX, - l.LO_COMMITDATE AS LO_COMMITDATE, - l.LO_SHIPMODE AS LO_SHIPMODE, - c.C_NAME AS C_NAME, - c.C_ADDRESS AS C_ADDRESS, - c.C_CITY AS C_CITY, - c.C_NATION AS C_NATION, - c.C_REGION AS C_REGION, - c.C_PHONE AS C_PHONE, - c.C_MKTSEGMENT AS C_MKTSEGMENT, - s.S_NAME AS S_NAME, - s.S_ADDRESS AS S_ADDRESS, - s.S_CITY AS S_CITY, - s.S_NATION AS S_NATION, - s.S_REGION AS S_REGION, - s.S_PHONE AS S_PHONE, - p.P_NAME AS P_NAME, - p.P_MFGR AS P_MFGR, - p.P_CATEGORY AS P_CATEGORY, - p.P_BRAND AS P_BRAND, - p.P_COLOR AS P_COLOR, - p.P_TYPE AS P_TYPE, - p.P_SIZE AS P_SIZE, - p.P_CONTAINER AS P_CONTAINER - FROM lineorder AS l - INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY - INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY - INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; - show settings ilike 'max_memory_usage'; -" - -function insert_data(){ - echo "insert_data:$1" - create_table_prefix="clickhouse client --host ${ckhost} --port $2 --multiquery -q" - insert_data_prefix="clickhouse client --query " - case $1 in - all) - clickhouse client --host ${ckhost} --port $2 --multiquery -q"$ckreadSql" && { - ${insert_data_prefix} "INSERT INTO customer FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/customer.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO part FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/part.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO supplier FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO lineorder FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl --port=$2 - } - ${create_table_prefix}"${lineorder_flat_table}" - ;; - customer) - echo ${create_table_prefix}\"${customer_table}\" - ${create_table_prefix}"${customer_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - part) - echo ${create_table_prefix}\"${part_table}\" - ${create_table_prefix}"${part_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - supplier) - echo ${create_table_prefix}"${supplier_table}" - ${create_table_prefix}"${supplier_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - lineorder) - echo ${create_table_prefix}"${lineorder_table}" - ${create_table_prefix}"${lineorder_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - lineorder_flat) - echo ${create_table_prefix}"${lineorder_flat_table}" - ${create_table_prefix}"${lineorder_flat_table}" - return 0 - ;; - *) - exit 0 - ;; - - esac -} - -function check_sql(){ - select_sql="select * from "$1" limit 1" - clickhouse client --host ${ckhost} --port $2 --multiquery -q"${select_sql}" -} - -function check_table(){ - checknum=0 - source_tables="customer part supplier lineorder lineorder_flat" - test_tables=${1:-${source_tables}} - echo "Checking table data required in server..." - for i in $(seq 0 $[inst_num-1]) - do - for j in `echo ${test_tables}` - do - check_sql $j ${ckport[i]} &> /dev/null || { - let checknum+=1 && insert_data "$j" ${ckport[i]} - } - done - done - - for i in $(seq 0 $[inst_num-1]) - do - echo "clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q\"select count() from ${TABLE_NAME};\"" - var=$(clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"select count() from ${TABLE_NAME};") - if [ $var -eq $TALBE_ROWS ];then - echo "Instance_${i} Table data integrity check OK -> Rows:$var" - else - echo "Instance_${i} Table data integrity check Failed -> Rows:$var" - exit 1 - fi - done - if [ $checknum -gt 0 ];then - echo "Need sleep 10s after first table data insertion...$checknum" - sleep 10 - fi -} - -function check_instance(){ -instance_alive=0 -for i in {1..10} -do - sleep 1 - netstat -nltp | grep ${1} > /dev/null - if [ $? -ne 1 ];then - instance_alive=1 - break - fi - -done - -if [ $instance_alive -eq 0 ];then - echo "check_instance -> clickhouse server instance faild to launch due to 10s timeout!" - exit 1 -else - echo "check_instance -> clickhouse server instance launch successfully!" -fi -} - -function start_clickhouse_for_insertion(){ - echo "start_clickhouse_for_insertion" - for i in $(seq 0 $[inst_num-1]) - do - echo "cd ${database_dir}/$1${dir_server[i]}" - echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null" - - cd ${database_dir}/$1${dir_server[i]} - ${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null - check_instance ${ckport[i]} - done -} - -function start_clickhouse_for_stressing(){ - echo "start_clickhouse_for_stressing" - for i in $(seq 0 $[inst_num-1]) - do - echo "cd ${database_dir}/$1${dir_server[i]}" - echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&" - - cd ${database_dir}/$1${dir_server[i]} - ${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null& - check_instance ${ckport[i]} - done -} -yum -y install git make gcc sudo net-tools &> /dev/null -pip3 install clickhouse_driver numpy &> /dev/null -test -d ${RAWDATA_DIR}/ssb-dbgen || git clone https://github.com/vadimtk/ssb-dbgen.git ${RAWDATA_DIR}/ssb-dbgen && cd ${RAWDATA_DIR}/ssb-dbgen - -if [ ! -f ${RAWDATA_DIR}/ssb-dbgen/dbgen ];then - make && { - test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y |./dbgen -s ${SSB_GEN_FACTOR} -T c - test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p - test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s - test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d - test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l - } -else - test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T c - test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p - test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s - test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d - test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l - -fi - -filenum=`find ${RAWDATA_DIR}/ssb-dbgen/ -name "*.tbl" | wc -l` - -if [ $filenum -ne 5 ];then - echo "generate ssb data file *.tbl faild" - exit 1 -fi - -function kill_instance(){ -instance_alive=1 -for i in {1..2} -do - pkill clickhouse && sleep 5 - instance_alive=0 - for i in $(seq 0 $[inst_num-1]) - do - netstat -nltp | grep ${ckport[i]} > /dev/null - if [ $? -ne 1 ];then - instance_alive=1 - break; - fi - done - if [ $instance_alive -eq 0 ];then - break; - fi -done -if [ $instance_alive -eq 0 ];then - echo "kill_instance OK!" -else - echo "kill_instance Failed -> clickhouse server instance still alive due to 10s timeout" - exit 1 -fi -} - -function run_test(){ -is_xml=0 -for i in $(seq 0 $[inst_num-1]) -do - if [ -f ${database_dir}/${1}${dir_server[i]}/config_${1}${dir_server[i]}.xml ]; then - is_xml=$[is_xml+1] - fi -done -if [ $is_xml -eq $inst_num ];then - echo "Benchmark with $inst_num instance" - start_clickhouse_for_insertion ${1} - - for i in $(seq 0 $[inst_num-1]) - do - clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null - done - - if [ $? -eq 0 ];then - check_table - fi - kill_instance - - if [ $1 == "deflate" ];then - test -f ${LOG_DIR}/${1}_server_log && deflatemsg=`cat ${LOG_DIR}/${1}_server_log | grep DeflateJobHWPool` - if [ -n "$deflatemsg" ];then - echo ------------------------------------------------------ - echo $deflatemsg - echo ------------------------------------------------------ - fi - fi - echo "Check table data required in server_${1} -> Done! " - - start_clickhouse_for_stressing ${1} - for i in $(seq 0 $[inst_num-1]) - do - clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null - done - if [ $? -eq 0 ];then - test -d ${CLIENT_SCRIPTS_DIR} && cd ${CLIENT_SCRIPTS_DIR} - echo "Client stressing... " - echo "${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log" - ${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log - echo "Completed client stressing, checking log... " - finish_log=`grep "Finished" ${LOG_DIR}/${1}.log | wc -l` - if [ $finish_log -eq 1 ] ;then - kill_instance - test -f ${LOG_DIR}/${1}.log && echo "${1}.log ===> ${LOG_DIR}/${1}.log" - else - kill_instance - echo "No find 'Finished' in client log -> Performance test may fail" - exit 1 - - fi - - else - echo "${1} clickhouse server start fail" - exit 1 - fi -else - echo "clickhouse server start fail -> Please check xml files required in ${database_dir} for each instance" - exit 1 - -fi -} -function clear_log(){ - if [ -d "$LOG_DIR" ]; then - cd ${LOG_DIR} && rm -rf * - fi -} - -function gather_log_for_codec(){ - cd ${OUTPUT_DIR} && mkdir -p ${LOG_PACK_FILE}/${1} - cp -rf ${LOG_DIR} ${OUTPUT_DIR}/${LOG_PACK_FILE}/${1} -} - -function pack_log(){ - if [ -e "${OUTPUT_DIR}/run.log" ]; then - cp ${OUTPUT_DIR}/run.log ${OUTPUT_DIR}/${LOG_PACK_FILE}/ - fi - echo "Please check all log information in ${OUTPUT_DIR}/${LOG_PACK_FILE}" -} - -function setup_check(){ - - iax_dev_num=`accel-config list | grep iax | wc -l` - if [ $iax_dev_num -eq 0 ] ;then - iax_dev_num=`accel-config list | grep iax | wc -l` - if [ $iax_dev_num -eq 0 ] ;then - echo "No IAA devices available -> Please check IAA hardware setup manually!" - exit 1 - else - echo "IAA enabled devices number:$iax_dev_num" - fi - else - echo "IAA enabled devices number:$iax_dev_num" - fi - libaccel_version=`accel-config -v` - clickhouser_version=`clickhouse server --version` - kernel_dxd_log=`dmesg | grep dxd` - echo "libaccel_version:$libaccel_version" - echo "clickhouser_version:$clickhouser_version" - echo -e "idxd section in kernel log:\n$kernel_dxd_log" -} - -setup_check -export CLICKHOUSE_WATCHDOG_ENABLE=0 -for i in ${CODEC_CONFIG[@]} -do - clear_log - codec=${i} - echo "run test------------$codec" - run_test $codec - gather_log_for_codec $codec -done - -pack_log -echo "Done." \ No newline at end of file diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py b/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py deleted file mode 100644 index f12381a198c..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py +++ /dev/null @@ -1,278 +0,0 @@ -from operator import eq -import os -import random -import time -import sys -from clickhouse_driver import Client -import numpy as np -import subprocess -import multiprocessing -from multiprocessing import Manager - -warmup_runs = 10 -calculated_runs = 10 -seconds = 30 -max_instances_number = 8 -retest_number = 3 -retest_tolerance = 10 - - -def checkInt(str): - try: - int(str) - return True - except ValueError: - return False - - -def setup_client(index): - if index < 4: - port_idx = index - else: - port_idx = index + 4 - client = Client( - host="localhost", - database="default", - user="default", - password="", - port="900%d" % port_idx, - ) - union_mode_query = "SET union_default_mode='DISTINCT'" - client.execute(union_mode_query) - return client - - -def warm_client(clientN, clientL, query, loop): - for c_idx in range(clientN): - for _ in range(loop): - clientL[c_idx].execute(query) - - -def read_queries(queries_list): - queries = list() - queries_id = list() - with open(queries_list, "r") as f: - for line in f: - line = line.rstrip() - line = line.split("$") - queries_id.append(line[0]) - queries.append(line[1]) - return queries_id, queries - - -def run_task(client, cname, query, loop, query_latency): - start_time = time.time() - for i in range(loop): - client.execute(query) - query_latency.append(client.last_query.elapsed) - - end_time = time.time() - p95 = np.percentile(query_latency, 95) - print( - "CLIENT: {0} end. -> P95: %f, qps: %f".format(cname) - % (p95, loop / (end_time - start_time)) - ) - - -def run_multi_clients(clientN, clientList, query, loop): - client_pids = {} - start_time = time.time() - manager = multiprocessing.Manager() - query_latency_list0 = manager.list() - query_latency_list1 = manager.list() - query_latency_list2 = manager.list() - query_latency_list3 = manager.list() - query_latency_list4 = manager.list() - query_latency_list5 = manager.list() - query_latency_list6 = manager.list() - query_latency_list7 = manager.list() - - for c_idx in range(clientN): - client_name = "Role_%d" % c_idx - if c_idx == 0: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list0), - ) - elif c_idx == 1: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list1), - ) - elif c_idx == 2: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list2), - ) - elif c_idx == 3: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list3), - ) - elif c_idx == 4: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list4), - ) - elif c_idx == 5: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list5), - ) - elif c_idx == 6: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list6), - ) - elif c_idx == 7: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list7), - ) - else: - print("ERROR: CLIENT number dismatch!!") - exit() - print("CLIENT: %s start" % client_name) - client_pids[c_idx].start() - - for c_idx in range(clientN): - client_pids[c_idx].join() - end_time = time.time() - totalT = end_time - start_time - - query_latencyTotal = list() - for item in query_latency_list0: - query_latencyTotal.append(item) - for item in query_latency_list1: - query_latencyTotal.append(item) - for item in query_latency_list2: - query_latencyTotal.append(item) - for item in query_latency_list3: - query_latencyTotal.append(item) - for item in query_latency_list4: - query_latencyTotal.append(item) - for item in query_latency_list5: - query_latencyTotal.append(item) - for item in query_latency_list6: - query_latencyTotal.append(item) - for item in query_latency_list7: - query_latencyTotal.append(item) - - totalP95 = np.percentile(query_latencyTotal, 95) * 1000 - return totalT, totalP95 - - -def run_task_caculated(client, cname, query, loop): - query_latency = list() - start_time = time.time() - for i in range(loop): - client.execute(query) - query_latency.append(client.last_query.elapsed) - end_time = time.time() - p95 = np.percentile(query_latency, 95) - - -def run_multi_clients_caculated(clientN, clientList, query, loop): - client_pids = {} - start_time = time.time() - for c_idx in range(clientN): - client_name = "Role_%d" % c_idx - client_pids[c_idx] = multiprocessing.Process( - target=run_task_caculated, - args=(clientList[c_idx], client_name, query, loop), - ) - client_pids[c_idx].start() - for c_idx in range(clientN): - client_pids[c_idx].join() - end_time = time.time() - totalT = end_time - start_time - return totalT - - -if __name__ == "__main__": - client_number = 1 - queries = list() - queries_id = list() - - if len(sys.argv) != 3: - print( - "usage: python3 client_stressing_test.py [queries_file_path] [client_number]" - ) - sys.exit() - else: - queries_list = sys.argv[1] - client_number = int(sys.argv[2]) - print( - "queries_file_path: %s, client_number: %d" % (queries_list, client_number) - ) - if not os.path.isfile(queries_list) or not os.access(queries_list, os.R_OK): - print("please check the right path for queries file") - sys.exit() - if ( - not checkInt(sys.argv[2]) - or int(sys.argv[2]) > max_instances_number - or int(sys.argv[2]) < 1 - ): - print("client_number should be in [1~%d]" % max_instances_number) - sys.exit() - - client_list = {} - queries_id, queries = read_queries(queries_list) - - for c_idx in range(client_number): - client_list[c_idx] = setup_client(c_idx) - # clear cache - os.system("sync; echo 3 > /proc/sys/vm/drop_caches") - - print("###Polit Run Begin") - for i in queries: - warm_client(client_number, client_list, i, 1) - print("###Polit Run End -> Start stressing....") - - query_index = 0 - for q in queries: - print( - "\n###START -> Index: %d, ID: %s, Query: %s" - % (query_index, queries_id[query_index], q) - ) - warm_client(client_number, client_list, q, warmup_runs) - print("###Warm Done!") - for j in range(0, retest_number): - totalT = run_multi_clients_caculated( - client_number, client_list, q, calculated_runs - ) - curr_loop = int(seconds * calculated_runs / totalT) + 1 - print( - "###Calculation Done! -> loopN: %d, expected seconds:%d" - % (curr_loop, seconds) - ) - - print("###Stress Running! -> %d iterations......" % curr_loop) - - totalT, totalP95 = run_multi_clients( - client_number, client_list, q, curr_loop - ) - - if totalT > (seconds - retest_tolerance) and totalT < ( - seconds + retest_tolerance - ): - break - else: - print( - "###totalT:%d is far way from expected seconds:%d. Run again ->j:%d!" - % (totalT, seconds, j) - ) - - print( - "###Completed! -> ID: %s, clientN: %d, totalT: %.2f s, latencyAVG: %.2f ms, P95: %.2f ms, QPS_Final: %.2f" - % ( - queries_id[query_index], - client_number, - totalT, - totalT * 1000 / (curr_loop * client_number), - totalP95, - ((curr_loop * client_number) / totalT), - ) - ) - query_index += 1 - print("###Finished!") diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql b/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql deleted file mode 100644 index abf2df6503a..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql +++ /dev/null @@ -1,10 +0,0 @@ -Q1.1$SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; -Q2.1$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q2.2$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q2.3$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q3.1$SELECT C_NATION,S_NATION,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 GROUP BY C_NATION,S_NATION,year ORDER BY year ASC,revenue DESC; -Q3.2$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC; -Q3.3$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC; -Q4.1$SELECT toYear(LO_ORDERDATE) AS year,C_NATION,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,C_NATION ORDER BY year ASC,C_NATION ASC; -Q4.2$SELECT toYear(LO_ORDERDATE) AS year,S_NATION,P_CATEGORY,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,S_NATION,P_CATEGORY ORDER BY year ASC,S_NATION ASC,P_CATEGORY ASC; -Q4.3$SELECT toYear(LO_ORDERDATE) AS year,S_CITY,P_BRAND,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year,S_CITY,P_BRAND ORDER BY year ASC,S_CITY ASC,P_BRAND ASC; diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh b/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh deleted file mode 100644 index 6067b1058f2..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh +++ /dev/null @@ -1,6 +0,0 @@ -WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." -if [ ! -d "${WORKING_DIR}/output" ]; then -mkdir ${WORKING_DIR}/output -fi -bash allin1_ssb.sh 2 > ${WORKING_DIR}/output/run.log -echo "Please check log in: ${WORKING_DIR}/output/run.log" \ No newline at end of file diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml deleted file mode 100644 index ab77a9cdcbe..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - deflate_qpl - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml deleted file mode 100644 index b71456486f5..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - deflate_qpl - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml deleted file mode 100644 index f4dc59b60aa..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - lz4 - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml deleted file mode 100644 index 357db8942d7..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - lz4 - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml deleted file mode 100644 index 1c4c738edaf..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - zstd - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml deleted file mode 100644 index f3db01b7739..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - zstd - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/re2-cmake/CMakeLists.txt b/contrib/re2-cmake/CMakeLists.txt index e72b5e1fca8..f773bc65a69 100644 --- a/contrib/re2-cmake/CMakeLists.txt +++ b/contrib/re2-cmake/CMakeLists.txt @@ -27,6 +27,17 @@ set(RE2_SOURCES add_library(_re2 ${RE2_SOURCES}) target_include_directories(_re2 PUBLIC "${SRC_DIR}") -target_link_libraries(_re2 ch_contrib::abseil_str_format) +target_link_libraries(_re2 PRIVATE + absl::base + absl::core_headers + absl::fixed_array + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::strings + absl::str_format + absl::synchronization + absl::optional + absl::span) add_library(ch_contrib::re2 ALIAS _re2) diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 2b6c48f0b38..7d7666dff87 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -93,11 +93,9 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") -if(HAVE_SSE42) +if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ) add_definitions(-DHAVE_SSE42) add_definitions(-DHAVE_PCLMUL) -elseif(FORCE_SSE42) - message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled") endif() set (HAVE_THREAD_LOCAL 1) @@ -429,7 +427,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc rocksdb_build_version.cc) -if(HAVE_SSE42) +if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ) set_source_files_properties( "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul") diff --git a/contrib/thrift-cmake/CMakeLists.txt b/contrib/thrift-cmake/CMakeLists.txt index d6aa6b9e5f2..89a444cfb83 100644 --- a/contrib/thrift-cmake/CMakeLists.txt +++ b/contrib/thrift-cmake/CMakeLists.txt @@ -47,8 +47,6 @@ set(thriftcpp_threads_SOURCES "${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp" ) -include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h - set (HAVE_ARPA_INET_H 1) set (HAVE_FCNTL_H 1) set (HAVE_GETOPT_H 1) @@ -81,10 +79,6 @@ if (OS_LINUX AND NOT USE_MUSL) set (STRERROR_R_CHAR_P 1) endif () -#set(PACKAGE ${PACKAGE_NAME}) -#set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}") -#set(VERSION ${thrift_VERSION}) - # generate a config.h file configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h") diff --git a/contrib/update-submodules.sh b/contrib/update-submodules.sh index b612d25352b..b12f3f924dc 100755 --- a/contrib/update-submodules.sh +++ b/contrib/update-submodules.sh @@ -9,4 +9,16 @@ cd $GIT_DIR contrib/sparse-checkout/setup-sparse-checkout.sh git submodule init git submodule sync -git config --file .gitmodules --get-regexp .*path | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _ +# NOTE: do not use --remote for `git submodule update`[1] command, since the submodule references to the specific commit SHA1 in the subproject. +# It may cause unexpected behavior. Instead you need to commit a new SHA1 for a submodule. +# +# [1] - https://git-scm.com/book/en/v2/Git-Tools-Submodules +git config --file .gitmodules --get-regexp '.*path' | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _ + +# We don't want to depend on any third-party CMake files. +# To check it, find and delete them. +grep -o -P '"contrib/[^"]+"' .gitmodules | + grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion)' | + xargs -I@ find @ \ + -'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' \ + -delete diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 63de9f6c462..b174dfde675 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.10.4.25" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index fb033e28959..20fb97c80bb 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -6,29 +6,27 @@ FROM clickhouse/test-util:latest AS cctools ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# DO NOT PUT ANYTHING BEFORE THREE NEXT `RUN` DIRECTIVES +# DO NOT PUT ANYTHING BEFORE THE NEXT TWO `RUN` DIRECTIVES # THE MOST HEAVY OPERATION MUST BE THE FIRST IN THE CACHE # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # libtapi is required to support .tbh format from recent MacOS SDKs -RUN git clone --depth 1 https://github.com/tpoechtrager/apple-libtapi.git \ +RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \ && cd apple-libtapi \ + && git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 \ && INSTALLPREFIX=/cctools ./build.sh \ && ./install.sh \ && cd .. \ && rm -rf apple-libtapi # Build and install tools for cross-linking to Darwin (x86-64) -RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \ +# Build and install tools for cross-linking to Darwin (aarch64) +RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd cctools-port/cctools \ + && git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 \ && ./configure --prefix=/cctools --with-libtapi=/cctools \ --target=x86_64-apple-darwin \ && make install -j$(nproc) \ - && cd ../.. \ - && rm -rf cctools-port - -# Build and install tools for cross-linking to Darwin (aarch64) -RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \ - && cd cctools-port/cctools \ + && make clean \ && ./configure --prefix=/cctools --with-libtapi=/cctools \ --target=aarch64-apple-darwin \ && make install -j$(nproc) \ @@ -62,19 +60,12 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup target add aarch64-unknown-linux-musl && \ rustup target add riscv64gc-unknown-linux-gnu -# NOTE: Seems like gcc-11 is too new for ubuntu20 repository # A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work): RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ && apt-get update \ && apt-get install --yes \ binutils-riscv64-linux-gnu \ build-essential \ - g++-11 \ - gcc-11 \ - gcc-aarch64-linux-gnu \ - libc6 \ - libc6-dev \ - libc6-dev-arm64-cross \ python3-boto3 \ yasm \ zstd \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 6b6374d08c9..fd9bfcaabb2 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -22,6 +22,7 @@ if [ "$EXTRACT_TOOLCHAIN_DARWIN" = "1" ]; then fi fi + # Uncomment to debug ccache. Don't put ccache log in /output right away, or it # will be confusingly packed into the "performance" package. # export CCACHE_LOGFILE=/build/ccache.log @@ -32,6 +33,7 @@ mkdir -p /build/build_docker cd /build/build_docker rm -f CMakeCache.txt + if [ -n "$MAKE_DEB" ]; then rm -rf /build/packages/root # NOTE: this is for backward compatibility with previous releases, diff --git a/docker/packager/packager b/docker/packager/packager index e63a4912e7c..b5bcbada1da 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -236,16 +236,14 @@ def parse_env_variables( cc = compiler result.append("DEB_ARCH=amd64") - cxx = cc.replace("gcc", "g++").replace("clang", "clang++") + cxx = cc.replace("clang", "clang++") if package_type == "deb": - # NOTE: This are the env for packages/build script + # NOTE: This is the env for packages/build script result.append("MAKE_DEB=true") cmake_flags.append("-DENABLE_TESTS=0") cmake_flags.append("-DENABLE_UTILS=0") - cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON") cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON") - cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON") cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") @@ -265,12 +263,7 @@ def parse_env_variables( elif package_type == "fuzzers": cmake_flags.append("-DENABLE_FUZZING=1") cmake_flags.append("-DENABLE_PROTOBUF=1") - cmake_flags.append("-DUSE_INTERNAL_PROTOBUF_LIBRARY=1") cmake_flags.append("-DWITH_COVERAGE=1") - cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON") - # cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") - # cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") - # cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") # Reduce linking and building time by avoid *install/all dependencies cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON") diff --git a/docker/server/.dockerignore b/docker/server/.dockerignore deleted file mode 100644 index d360712c18f..00000000000 --- a/docker/server/.dockerignore +++ /dev/null @@ -1,8 +0,0 @@ -# post / preinstall scripts (not needed, we do it in Dockerfile) -alpine-root/install/* - -# docs (looks useless) -alpine-root/usr/share/doc/* - -# packages, etc. (used by alpine-build.sh) -tgz-packages/* diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index d26bb344fef..d4498abda6a 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.10.4.25" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 53a36818121..08e95cd535b 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.10.4.25" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index ec24b237752..6e3721956c0 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -126,6 +126,9 @@ function setup_logs_replication # It's doesn't make sense to try creating tables if SYNC fails echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0 + debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'") + echo "Build is debug or sanitizer: $debug_or_sanitizer_build" + # For each system log table: echo 'Create %_log tables' clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table @@ -133,7 +136,14 @@ function setup_logs_replication if [[ "$table" = "trace_log" ]] then EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_TRACE_LOG}" - EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}" + # Do not try to resolve stack traces in case of debug/sanitizers + # build, since it is too slow (flushing of trace_log can take ~1min + # with such MV attached) + if [[ "$debug_or_sanitizer_build" = 1 ]]; then + EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}" + else + EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}" + fi else EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}" EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}" @@ -182,3 +192,13 @@ function setup_logs_replication " || continue done ) + +function stop_logs_replication +{ + echo "Detach all logs replication" + clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { + tee /dev/stderr + } | { + xargs -n1 -r -i clickhouse-client --query "drop table {}" + } +} diff --git a/docker/test/fuzzer/generate-test-j2.py b/docker/test/fuzzer/generate-test-j2.py index 11525163ed8..6fd37d6bd02 100755 --- a/docker/test/fuzzer/generate-test-j2.py +++ b/docker/test/fuzzer/generate-test-j2.py @@ -3,6 +3,7 @@ from argparse import ArgumentParser import os import jinja2 +import itertools def removesuffix(text, suffix): @@ -47,6 +48,7 @@ def main(args): loader=jinja2.FileSystemLoader(suite_dir), keep_trailing_newline=True, ) + j2env.globals.update(product=itertools.product) test_names = os.listdir(suite_dir) for test_name in test_names: diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index ecd7aae2e4a..023f257253a 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -23,11 +23,6 @@ 10G - - - - - 200 diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index af1ce0c4dd4..8aeb06ec27b 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -212,11 +212,11 @@ quit gdb -batch -command script.gdb -p $server_pid & sleep 5 - # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) + # gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s) time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: # Check connectivity after we attach gdb, because it might cause the server - # to freeze and the fuzzer will fail. In debug build it can take a lot of time. + # to freeze, and the fuzzer will fail. In debug build, it can take a lot of time. for _ in {1..180} do if clickhouse-client --query "select 1" @@ -226,14 +226,15 @@ quit sleep 1 done kill -0 $server_pid # This checks that it is our server that is started and not some other one - echo 'Server started and responded' + echo 'Server started and responded.' setup_logs_replication # SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric. - # SC2046: Quote this to prevent word splitting. Actually I need word splitting. + # SC2046: Quote this to prevent word splitting. Actually, I need word splitting. # shellcheck disable=SC2012,SC2046 timeout -s TERM --preserve-status 30m clickhouse-client \ + --max_memory_usage_in_client=1000000000 \ --receive_timeout=10 \ --receive_data_timeout_ms=10000 \ --stacktrace \ @@ -253,10 +254,10 @@ quit wait "$fuzzer_pid" || fuzzer_exit_code=$? echo "Fuzzer exit code is $fuzzer_exit_code" - # If the server dies, most often the fuzzer returns code 210: connetion + # If the server dies, most often the fuzzer returns Code 210: Connetion # refused, and sometimes also code 32: attempt to read after eof. For - # simplicity, check again whether the server is accepting connections, using - # clickhouse-client. We don't check for existence of server process, because + # simplicity, check again whether the server is accepting connections using + # clickhouse-client. We don't check for the existence of the server process, because # the process is still present while the server is terminating and not # accepting the connections anymore. diff --git a/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml b/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml index 2db9fb589d2..61b21e0e3d9 100644 --- a/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml +++ b/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml @@ -6,9 +6,13 @@ services: hostname: rabbitmq1 expose: - ${RABBITMQ_PORT:-5672} + - ${RABBITMQ_SECURE_PORT:-5671} volumes: - type: ${RABBITMQ_LOGS_FS:-tmpfs} source: ${RABBITMQ_LOGS:-} target: /rabbitmq_logs/ - "${RABBITMQ_COOKIE_FILE}:/var/lib/rabbitmq/.erlang.cookie" - - /misc/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf \ No newline at end of file + - /misc/rabbitmq/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf + - /misc/rabbitmq/ca-cert.pem:/etc/rabbitmq/ca-cert.pem + - /misc/rabbitmq/server-cert.pem:/etc/rabbitmq/server-cert.pem + - /misc/rabbitmq/server-key.pem:/etc/rabbitmq/server-key.pem diff --git a/docker/test/integration/runner/misc/rabbitmq.conf b/docker/test/integration/runner/misc/rabbitmq.conf deleted file mode 100644 index 3527c83880b..00000000000 --- a/docker/test/integration/runner/misc/rabbitmq.conf +++ /dev/null @@ -1,8 +0,0 @@ -loopback_users.guest = false -listeners.tcp.default = 5672 -default_pass = clickhouse -default_user = root -management.tcp.port = 15672 - -log.file = /rabbitmq_logs/rabbit.log -log.file.level = debug diff --git a/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem b/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem new file mode 100644 index 00000000000..4a7b88f7936 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFhTCCA22gAwIBAgIUWhfjFfbwannH3KIqITDtgcvSItMwDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjMxMTE0 +MTgyODI2WhcNMzMxMTExMTgyODI2WjBSMQswCQYDVQQGEwJSVTETMBEGA1UECAwK +U29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQsw +CQYDVQQDDAJjYTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJfJegdC +gavNGYzSdva+5QMxGvqyLwZzjophMeyEzlW/Di4KFGPho+fVlVMB/EwaTRoBRLEu +SQusQwoFg71mGvUTOpgHzlsUz4vcVVFOiL4bJdzCWQKzdC8M8rUFoks9FMboVeSx +jhAnKAm/NpCLpm9VYnRjEq2KEbJp7VkPAHgZEXR7VABwCFvmDcztrfcWfmXxm6IH +o+AkF/nqdphLu7Q1yDQiF8Q8TuszuhqgQ7/1PrRcaSADrF15jJjQb05sILpGCT3e +lxJYId5RF0+fgTIqy03bAKB53+8V8cAkowI4rvPTmcFXhcG3rkDO6lyZixHhlpKi +PmXEzHh0kfsRjzkNBP0CKqPnu3D2iymROiPAH2cteaYe6jdD2HIjuVLk/TjX1ZFy +DlZCrJIwj0l8A2xAfLq8Gw5RSr0a9k5TiMD5nZtfd12Vd0K82vO32vmcjO2Igddc +VWccDDwUY/ZWV3uznkusOBrB8wba3ZsXA5hjJzs0KlTvQKPjX0y4lFMmZGbelwjt +pR5dRNLi5XTdMPzV0mAnvJhDTFEmME19Bh6AEsjuAz3gHUdwNTbSxUS3mF/hTL9k +v2wh5udUAOwqD1uEzqPJyG4JCJQozIDOEEZVixWqQ60b9wUHN8meqO4y9fxTdmHW +Vo5BAF1xEJhJJb0QY/O6GahPtWqb/Mr1rtPJAgMBAAGjUzBRMB0GA1UdDgQWBBSw +fQcOabXwX/v9F1hd2cmuIug56jAfBgNVHSMEGDAWgBSwfQcOabXwX/v9F1hd2cmu +Iug56jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAms8y6RVxl +mKSUbsU8JscYwOzcRUQJWETeIr4rtZvMHH+3vkdBU0yKxGpEm7U8J3+5oVTYPhbs +11ZAL+DvIZ6gT6pjDvECyVox1OkjNogz843fTMbNqjuuehjSKXwpMTy5/kmT2aLj +//nBi5UX1xo3RQ9vtmBwzZ3VFK99DFXraDOPS/yk43WV2uqdWsXCNvyEyCHmM1IB +9FQe2EFcO6s4/N+TarhIZ8Udhj5bl8d4eDd1yEckmTD4aHJBgMII2uEwrAxR5CT1 +tCqUKutvNrkXI5PIULvmy+Lwm7PJAC7grPtUHK6anSugpljd7bFj18fHH9APiC45 +Ou4OOK1BUZogCEo7rD36UlanxQO0GEzgDCVEoEdoe0WRdc6T9b4fM8vpQqwBdf9t +nkPB8oLCKerqqYwCiMuWm4BcRmExA7ypIkUCcluGO9/kTmdps3NqOvET9oLTjXuA +z5TPmaK5a3poKLoxBfv6WfRTgisOnMNTsjL1R8+xuhEn5hSlE2r3wAi8Cys9Z9PV +LhTj0SRTXILd2NW3lO8QfO0pGdjgk90GqkyUY9YjuiMVPvdUAFQsHm+0GEZEXjOD +Bw7tLSJQ4IKhfactg/Puxd15ahcWAxeelyED+w/zVGdHYblqbvfdtiGj370KVhoj +DL5HkdPa0IhTPqMBnmoVQ4C/WzKofXBjQQ== +-----END CERTIFICATE----- diff --git a/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh b/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh new file mode 100755 index 00000000000..442d2fe004f --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# 1. Generate CA's private key and self-signed certificate +openssl req -newkey rsa:4096 -x509 -days 3650 -nodes -batch -keyout ca-key.pem -out ca-cert.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=ca" + +# 2. Generate server's private key and certificate signing request (CSR) +openssl req -newkey rsa:4096 -nodes -batch -keyout server-key.pem -out server-req.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=server" + +# 3. Use CA's private key to sign server's CSR and get back the signed certificate +openssl x509 -req -days 3650 -in server-req.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -extfile server-ext.cnf -out server-cert.pem diff --git a/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf new file mode 100644 index 00000000000..258a282907a --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf @@ -0,0 +1,15 @@ +loopback_users.guest = false +listeners.tcp.default = 5672 +default_pass = clickhouse +default_user = root +management.tcp.port = 15672 + +log.file = /rabbitmq_logs/rabbit.log +log.file.level = debug + +listeners.ssl.default = 5671 +ssl_options.verify = verify_none +ssl_options.fail_if_no_peer_cert = false +ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem +ssl_options.certfile = /etc/rabbitmq/server-cert.pem +ssl_options.keyfile = /etc/rabbitmq/server-key.pem diff --git a/docker/test/integration/runner/misc/rabbitmq/server-cert.pem b/docker/test/integration/runner/misc/rabbitmq/server-cert.pem new file mode 100644 index 00000000000..338de91aa0f --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFpTCCA42gAwIBAgIUJvQslezZO09XgFGQCxOM6orIsWowDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjMxMTE0 +MTgyODI5WhcNMzMxMTExMTgyODI5WjBWMQswCQYDVQQGEwJSVTETMBEGA1UECAwK +U29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8w +DQYDVQQDDAZzZXJ2ZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCe +o/K71WdKpVpdDvhaZy6wBVhFlu7j7DhfTSYvcPpAJfExmzO8JK3vh5/yGyAO1t79 +gAjqyXLMCZKw7ajM2rez9YnGYqaFi70BlTcU2KQ8LbFEYRc3cYNDmmWIKBpwpSri +We5SQrRLnDXqAn6T8FG5ejQ/t+1IUMrtZENB4lp8fBmEOJb5yr1TE++6EhiDBQho +cLDWWWP8b55kyZhqP/VgmId4lvboGMRKxbiRJ6/SPr/i/pteBD8jTYfbJr6ceXov +/p5yxIp61z5ry1anU7W3B8jTl/gj7SqtFdSnRajZ0DGJJAUKpiiJSCSlp5YB5Ub2 +eBBMHmdA5R1MuiU9TOA35nUW5wkhEOJXnBR/WCsYioVmn/+5dm6JPYiwp/TefYnr +x9iLbb/Tyx7MnXzeyvKg781SwmnvS6Blhtr0zhAW9szZz8cVHPBqFs6PzGs/5mwE +C+tM3Zp85aHd28nIT4NQLHdMDwVmGwmPdy4uavtYWMDhsuIyEU8hCZymiHhPnuHU +VbmfZ8GOTIzUgQAvZb0fL1Xow2Tf6XuARnvuU9weRttg9jSOqPuUENRsFXv0mU8M +EpQjrxry88Wfz7bBEjN5JHC16PB/Nu7zTGJ4/slThbxNv0bIONzvTBPbXrKnxw7Z +d9WhGJI+LQxRqLTynQe6yzDwIuW9LRdBNTp7CtQRwQIDAQABo28wbTArBgNVHREE +JDAigiBpbnRlZ3JhdGlvbi10ZXN0cy5jbGlja2hvdXNlLmNvbTAdBgNVHQ4EFgQU +54GvBUYWvMADpTz/zglwMlaJuskwHwYDVR0jBBgwFoAUsH0HDmm18F/7/RdYXdnJ +riLoOeowDQYJKoZIhvcNAQELBQADggIBADfNH6O6ay+xg0XmV6sR0n4j6PwL9Cnc +VjuCmHQbpFXfMvgCdfHvbtT0Y/pG7IoeKmrrm0JPvKa2E9Ht0j6ZnowQ2m9mJk8U +5Fd/PbC1I4KgVCw6HRSOcwqANJxOGe7RyN9PTZZ8fxzmzIR3FiQ2bXfr+LaotZOK +aVS8F8xCOzoMvL9LFls2YpEn20p/1EATIf2MFX3j9vKfcJVOyDJV4i5BMImStFLM +g3sdC96de/59yxt9khM0PNucU1ldNFs/kZVEcNSwGOAIgQEPwULJtDY+ZSWeROpX +EpWndN6zQsv1pdNvLtXsDXfi4YoH9QVaA/k4aFFJ08CjSZfMYmwyPOGsf/wqT65i +ADID2yb1A/FIIe/fM+d2gXHBVFBDmydJ1JCdCoYrEJgfWj1LO/0jLi34ZZ17Hu7F +D33fLARF9nlLzlUiWjcQlOjNoCM48AgG/3wHk4eiSfc/3PIJDuDGDa0NdtDeKKhH +XkP2ll4cMUH6EQ9KO1jHPmf5RokX4QJgH+ofO4U5XQFwc3lOyJzEQnED+wame7do +R7TE4F/OXhxLqA6DFkzXe89/kSCoAF9bjzmUn/ilrg8NXKKgprgHg4DJHgvCQVVC +34ab7Xj7msUm4D9vI+GAeUbUqnqCaWxDF6vCMT0Qq7iSVDxa/SV8TX8Vp2Zh+PSh +4m23Did+KjLq +-----END CERTIFICATE----- diff --git a/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf b/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf new file mode 100644 index 00000000000..49859873222 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf @@ -0,0 +1 @@ +subjectAltName=DNS:integration-tests.clickhouse.com diff --git a/docker/test/integration/runner/misc/rabbitmq/server-key.pem b/docker/test/integration/runner/misc/rabbitmq/server-key.pem new file mode 100644 index 00000000000..92e93e8fba5 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCeo/K71WdKpVpd +DvhaZy6wBVhFlu7j7DhfTSYvcPpAJfExmzO8JK3vh5/yGyAO1t79gAjqyXLMCZKw +7ajM2rez9YnGYqaFi70BlTcU2KQ8LbFEYRc3cYNDmmWIKBpwpSriWe5SQrRLnDXq +An6T8FG5ejQ/t+1IUMrtZENB4lp8fBmEOJb5yr1TE++6EhiDBQhocLDWWWP8b55k +yZhqP/VgmId4lvboGMRKxbiRJ6/SPr/i/pteBD8jTYfbJr6ceXov/p5yxIp61z5r +y1anU7W3B8jTl/gj7SqtFdSnRajZ0DGJJAUKpiiJSCSlp5YB5Ub2eBBMHmdA5R1M +uiU9TOA35nUW5wkhEOJXnBR/WCsYioVmn/+5dm6JPYiwp/TefYnrx9iLbb/Tyx7M +nXzeyvKg781SwmnvS6Blhtr0zhAW9szZz8cVHPBqFs6PzGs/5mwEC+tM3Zp85aHd +28nIT4NQLHdMDwVmGwmPdy4uavtYWMDhsuIyEU8hCZymiHhPnuHUVbmfZ8GOTIzU +gQAvZb0fL1Xow2Tf6XuARnvuU9weRttg9jSOqPuUENRsFXv0mU8MEpQjrxry88Wf +z7bBEjN5JHC16PB/Nu7zTGJ4/slThbxNv0bIONzvTBPbXrKnxw7Zd9WhGJI+LQxR +qLTynQe6yzDwIuW9LRdBNTp7CtQRwQIDAQABAoICAA0lev0T3z5xW36wueYL/PN7 +TehebKeYsMc9BngR/bsJKea5fN0PkRZzf865brusFMifLp3+WbQM6wocd8uaKHUS +WPuGu1P/04bpDap9lYajJriK7ziaAI2+osFYyXAiT954I2bPvk8xv8oHsOOjm7Iq +LWBGZrSCdX6cu3IfRu5f/mFVqzVCFtRmp4wc6ckZxquZAx6QQ9fsjAzAJBBSAoyh +t0BICmgLfWDQ582no0tiBdbS0J9G7NCJIUQI/uzKqFSH3iuWm/84DSUzsZemOT3U +uFDInDil885qK7g87pQ2S5SY1o4eXOebgeX0cFrx3CKaqocUUewv0HDGUEW3NDFs +KhUvlJZIFgk6bMend16U6kfRCUsjLA22Rfxzanl53cGVywCeIMirnLYuEu0TsxyK +CblBvyhcpjrGi7FQskzR+J9LpZPnmtn6TAb7JCAALRVHcAGKhGeh613SjPUfkWb0 +KpDps08x8MWGEAALuHbOK0nMLFm+PuMt7+krqCeJET+XM44GT+6ZstrDv0RufxUN ++pkLW7AsVZoXcFvaOWjuyBvX/f6UHCSfueo0mB3H80WoftDIfdhM+AI7/oBTYCBx +Z8BtW+g7Eq3pOUg/Um7S7Z2bybBWE14kpi95gRf3upEYPqHJUpJPdu20lk24iAt9 +LCXF4AjZBIdAuyJrYOJBAoIBAQDd/Bm14WvmBOablGLn6hmohi6M75D+/eQanlg9 +eJhXJUVd8FzOTjKi70EHWvkqswenNDbe/WGtImqG+9G+N/ol2qhi5xVSQ2XQmcVQ +U+k15Bzm9xKM0OqsStFvRgP1Cy6Ms3/jxr5JEEwUepmjvWTDGTlhTQASA/D7Uh2q +5HpPiHEVm4g5eTAYWeAbI6cGwVS0L4y6xkFGde37Kh2P8ZodWB+d3fglVu4Ok9Nf +wE2f8MK2ewQ0SbF/Nj2WjlVomvOvOJG/2CDLuiH/vc4YUvLAm8pNwvsmgtSh1Okt +E/HfXegrlPPEgw6owqoQFt+aGUITgEhiwEVAcYS0pXzzkQX5AoIBAQC28wJ8ueKr +fINpJM2pSc7WRDFduP5yGsRreSLBXLKMbvOlIVb3PaWp11Cg3+X5O90bPXYJ9mBI +WGR0g14/VD8edxs2D5TUZcP4/vKXGHaWRY9Z4A3jVpjzAxAaviNDHJ08tLXEMXZQ +lbA7dX8z6lpoQfwnPzjBwB01mVegwXPeIwIIfT/FmAiGzvSnAMXBGSGWRRdzof0M +/vPFbgllcQmM4AnEGcErCgFRpwcssO87T2jnvf6QVE5JCcnUcGIli1ThxCU9TRZM +5s6R7Nvk3/UjwcpRcqMtnGpTT2QXSnRwvWUfM+bKTwaxz4PjqKpgIc11kwJAjlxk +4CxYf1mDGLwJAoIBAGFJRTNS8ejDKRXyOE6PaGNVOz2FGLTILJoF34JBQfKfYQFE +gEfiOYry9Dr3AdBW2fnLhmi//3jTZoB2CHwnKDhC1h1STSPaadq8KZ+ExuZZbNlE +WxrfzJlpyNPNiZpxJht/54K57Vc0D0PCX2dFb82ZVm5wQqGinJBocpwcugX1NCpW +GaOmmw9xBCigvWjWffriA/kvPhhVQtEaqg4Vwoctwd18FG645Gf7HV4Pd3WrHIrA +6xzHV0T7To6XHpNTpYybbDT50ZW3o4LjellqsPz8yfK+izdbizjJiM+6t/w+uauw +Ag2Tqm8HsWSPwbtVaoIFbLPqs+8EUTaieFp+qnECggEAVuaTdd9uFfrtCNKchh8z +CoAV2uj2pAim6E3//k0j2qURQozVnFdCC6zk9aWkvYB8BGZrXUwUbAjgnp+P8xD3 +cmctG77G+STls66WWMMcAUFFWHGe5y/JMxVvXuSWJ1i+L4m/FVRRWPHhZjznkSdu +jjtZpOLY+N9igIU4JHn/qbKDUrj7w8X1tuMzPuiVBqYDWDe1bg2x/6xS6qLb/71z +xeDdgrKhGOqFud1XARmCaW/M6tdKxg/lp7fokOpZFHBcf2kGL1ogj6LK2HHj+ZGQ +Bc4VZh7H9/BmaPA7IP0S1kKAeBPVOp/TFD737Pm/BC7KQ2DzHusAZEI/jkHfqO/k +0QKCAQEAuiYLn9iLgk4uQO9oaSBGWKrJsR2L2dqI7IWU0X9xJlsQrJKcEeWg4LXt +djLsz0HrxZV/c+Pnh79hmFlBoEmH+hz32D/xd+/qrwwAcMkHAwMbznJu0IIuW2O9 +Uzma++7SvVmr9H0DkUwXFP3jn1A2n3uuI4czqtQ8N7GiH0UAWR5CsIP7azHvZTSj +s4Fzf8rTE6pNqVgQXjrVbI9H/h0uPP4alJbhnPba9mgB1cGmfBEnPkKgYNqSZse+ +95G2TlcK74sKBUSdBKqYBZ4ZUeTXV974Nva9guE9vzDQt1Cj6k0HWISVPUshPzIh +qrdHdxcM6yhA0Z0Gu6zj+Zsy4lU8gA== +-----END PRIVATE KEY----- diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml index cb591f1a184..e780a99ecde 100644 --- a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml +++ b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml @@ -34,9 +34,4 @@ 0 - - - 1 - - diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 9951d79d6ac..07b40ea3b3d 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -217,6 +217,9 @@ ls -la / clickhouse-client -q "system flush logs" ||: +# stop logs replication to make it possible to dump logs tables via clickhouse-local +stop_logs_replication + # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? # Because it's the simplest way to read it when server has crashed. diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 551461b6eca..8f89c1b80dd 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -140,21 +140,6 @@ EOL --> $PWD -EOL - - # Analyzer is not yet ready for testing - cat > /etc/clickhouse-server/users.d/no_analyzer.xml < - - - - - - - - - - EOL } diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 356e3f27728..57b683a16c3 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -78,6 +78,7 @@ remove_keeper_config "create_if_not_exists" "[01]" rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml +rm /etc/clickhouse-server/users.d/s3_cache_new.xml start stop @@ -114,6 +115,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml +rm /etc/clickhouse-server/users.d/s3_cache_new.xml start diff --git a/docs/changelogs/v23.10.5.20-stable.md b/docs/changelogs/v23.10.5.20-stable.md new file mode 100644 index 00000000000..03e8c47481b --- /dev/null +++ b/docs/changelogs/v23.10.5.20-stable.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.10.5.20-stable (e84001e5c61) FIXME as compared to v23.10.4.25-stable (330fd687d41) + +#### Improvement +* Backported in [#56924](https://github.com/ClickHouse/ClickHouse/issues/56924): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57023](https://github.com/ClickHouse/ClickHouse/issues/57023): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.3.18.15-lts.md b/docs/changelogs/v23.3.18.15-lts.md new file mode 100644 index 00000000000..3bf993a0960 --- /dev/null +++ b/docs/changelogs/v23.3.18.15-lts.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.18.15-lts (7228475d77a) FIXME as compared to v23.3.17.13-lts (e867d59020f) + +#### Improvement +* Backported in [#56928](https://github.com/ClickHouse/ClickHouse/issues/56928): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57019](https://github.com/ClickHouse/ClickHouse/issues/57019): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.8.8.20-lts.md b/docs/changelogs/v23.8.8.20-lts.md new file mode 100644 index 00000000000..345cfcccf17 --- /dev/null +++ b/docs/changelogs/v23.8.8.20-lts.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.8.8.20-lts (5e012a03bf2) FIXME as compared to v23.8.7.24-lts (812b95e14ba) + +#### Improvement +* Backported in [#56509](https://github.com/ClickHouse/ClickHouse/issues/56509): Allow backup of materialized view with dropped inner table instead of failing the backup. [#56387](https://github.com/ClickHouse/ClickHouse/pull/56387) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#56929](https://github.com/ClickHouse/ClickHouse/issues/56929): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57020](https://github.com/ClickHouse/ClickHouse/issues/57020): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.9.6.20-stable.md b/docs/changelogs/v23.9.6.20-stable.md new file mode 100644 index 00000000000..b4aed625fea --- /dev/null +++ b/docs/changelogs/v23.9.6.20-stable.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.9.6.20-stable (cf7e84bb8cf) FIXME as compared to v23.9.5.29-stable (f8554c1a1ff) + +#### Improvement +* Backported in [#56930](https://github.com/ClickHouse/ClickHouse/issues/56930): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57022](https://github.com/ClickHouse/ClickHouse/issues/57022): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index b7e567c7b6c..f32698f84f6 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -7,7 +7,10 @@ sidebar_position: 70 # [experimental] MaterializedMySQL :::note -This is an experimental feature that should not be used in production. +This database engine is experimental. To use it, set `allow_experimental_database_materialized_mysql` to 1 in your configuration files or by using the `SET` command: +```sql +SET allow_experimental_database_materialized_mysql=1 +``` ::: Creates a ClickHouse database with all the tables existing in MySQL, and all the data in those tables. The ClickHouse server works as MySQL replica. It reads `binlog` and performs DDL and DML queries. diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index 4e978947e36..3aa6dd01ea3 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -8,7 +8,7 @@ sidebar_position: 60 Creates a ClickHouse database with tables from PostgreSQL database. Firstly, database with engine `MaterializedPostgreSQL` creates a snapshot of PostgreSQL database and loads required tables. Required tables can include any subset of tables from any subset of schemas from specified database. Along with the snapshot database engine acquires LSN and once initial dump of tables is performed - it starts pulling updates from WAL. After database is created, newly added tables to PostgreSQL database are not automatically added to replication. They have to be added manually with `ATTACH TABLE db.table` query. -Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. In this case you should use `ATTACH`/ `DETACH` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position). +Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. In this case you should use `ATTACH`/ `DETACH PERMANENTLY` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position). :::note This database engine is experimental. To use it, set `allow_experimental_database_materialized_postgresql` to 1 in your configuration files or by using the `SET` command: @@ -63,7 +63,7 @@ Before version 22.1, adding a table to replication left a non-removed temporary It is possible to remove specific tables from replication: ``` sql -DETACH TABLE postgres_database.table_to_remove; +DETACH TABLE postgres_database.table_to_remove PERMANENTLY; ``` ## PostgreSQL schema {#schema} diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index 3df08ee2ffb..c6525121667 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -47,6 +47,12 @@ SELECT * FROM test_table; └──────┴───────┘ ``` +## Virtual columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## See also [Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage) diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 23ab89e1983..9af857b0835 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -85,6 +85,10 @@ You can also change any [rocksdb options](https://github.com/facebook/rocksdb/wi ``` +By default trivial approximate count optimization is turned off, which might affect the performance `count()` queries. To enable this +optimization set up `optimize_trivial_approximate_count_query = 1`. Also, this setting affects `system.tables` for EmbeddedRocksDB engine, +turn on the settings to see approximate values for `total_rows` and `total_bytes`. + ## Supported operations {#supported-operations} ### Inserts diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index c677123a8d0..19221c256f9 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -230,8 +230,9 @@ libhdfs3 support HDFS namenode HA. ## Virtual Columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/engines/table-engines/integrations/materialized-postgresql.md b/docs/en/engines/table-engines/integrations/materialized-postgresql.md index 02afec5cfd6..4d83ca79d5c 100644 --- a/docs/en/engines/table-engines/integrations/materialized-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialized-postgresql.md @@ -8,6 +8,14 @@ sidebar_label: MaterializedPostgreSQL Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database. +:::note +This table engine is experimental. To use it, set `allow_experimental_materialized_postgresql_table` to 1 in your configuration files or by using the `SET` command: +```sql +SET allow_experimental_materialized_postgresql_table=1 +``` +::: + + If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database. ## Creating a Table {#creating-a-table} diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 2967a15494c..3144bdd32fa 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -142,8 +142,9 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr ## Virtual columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 7e564b23676..97d37e476ae 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -14,7 +14,7 @@ You should never use too granular of partitioning. Don't partition your data by Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well. -A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. +A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition. The partition is specified in the `PARTITION BY expr` clause when [creating a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). The partition key can be any expression from the table columns. For example, to specify partitioning by month, use the expression `toYYYYMM(date_column)`: diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index e615c9ad9d3..89b002da192 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -6,7 +6,7 @@ sidebar_label: MergeTree # MergeTree -The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines. +The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most commonly used and most robust ClickHouse table engines. Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert. @@ -32,13 +32,15 @@ Main features: The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family. ::: +If you need to update rows frequently, we recommend using the [`ReplacingMergeTree`](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) table engine. Using `ALTER TABLE my_table UPDATE` to update rows triggers a mutation, which causes parts to be re-written and uses IO/resources. With `ReplacingMergeTree`, you can simply insert the updated rows and the old rows will be replaced according to the table sorting key. + ## Creating a Table {#table_engine-mergetree-creating-a-table} ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( - name1 [type1] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [COMMENT ...] [CODEC(codec1)] [TTL expr1] [PRIMARY KEY], - name2 [type2] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [COMMENT ...] [CODEC(codec2)] [TTL expr2] [PRIMARY KEY], + name1 [type1] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [COMMENT ...] [CODEC(codec1)] [STATISTIC(stat1)] [TTL expr1] [PRIMARY KEY], + name2 [type2] [[NOT] NULL] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [COMMENT ...] [CODEC(codec2)] [STATISTIC(stat2)] [TTL expr2] [PRIMARY KEY], ... INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1], INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2], @@ -502,8 +504,8 @@ Indexes of type `set` can be utilized by all functions. The other index types ar | Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted | |------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------| -| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | @@ -511,10 +513,10 @@ Indexes of type `set` can be utilized by all functions. The other index types ar | [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ | | [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | @@ -1356,3 +1358,33 @@ In this sample configuration: - `_partition_value` — Values (a tuple) of a `partition by` expression. - `_sample_factor` — Sample factor (from the query). - `_block_number` — Block number of the row, it is persisted on merges when `allow_experimental_block_number_column` is set to true. + +## Column Statistics (Experimental) {#column-statistics} + +The statistic declaration is in the columns section of the `CREATE` query for tables from the `*MergeTree*` Family when we enable `set allow_experimental_statistic = 1`. + +``` sql +CREATE TABLE example_table +( + a Int64 STATISTIC(tdigest), + b Float64 +) +ENGINE = MergeTree +ORDER BY a +``` + +We can also manipulate statistics with `ALTER` statements. + +```sql +ALTER TABLE example_table ADD STATISTIC b TYPE tdigest; +ALTER TABLE example_table DROP STATISTIC a TYPE tdigest; +``` + +These lightweight statistics aggregate information about distribution of values in columns. +They can be used for query optimization when we enable `set allow_statistic_optimize = 1`. + +#### Available Types of Column Statistics {#available-types-of-column-statistics} + +- `tdigest` + + Stores distribution of values from numeric columns in [TDigest](https://github.com/tdunning/t-digest) sketch. diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 27945b30c03..6e3897398a5 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -87,12 +87,18 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64 - Indices - Replication -## PARTITION BY +## PARTITION BY {#partition-by} `PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression). For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index 5a5e1564180..f6183a779ae 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -103,6 +103,12 @@ SELECT * FROM url_engine_table For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. +- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. +- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## Storage Settings {#storage-settings} - [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/operations/optimizing-performance/profile-guided-optimization.md b/docs/en/operations/optimizing-performance/profile-guided-optimization.md index 3d36bb2cc14..340ae31d7c4 100644 --- a/docs/en/operations/optimizing-performance/profile-guided-optimization.md +++ b/docs/en/operations/optimizing-performance/profile-guided-optimization.md @@ -16,9 +16,9 @@ More information about PGO in ClickHouse you can read in the corresponding GitHu There are two major kinds of PGO: [Instrumentation](https://clang.llvm.org/docs/UsersManual.html#using-sampling-profilers) and [Sampling](https://clang.llvm.org/docs/UsersManual.html#using-sampling-profilers) (also known as AutoFDO). In this guide is described the Instrumentation PGO with ClickHouse. -1. Build ClickHouse in Instrumented mode. In Clang it can be done via passing `-fprofile-instr-generate` option to `CXXFLAGS`. +1. Build ClickHouse in Instrumented mode. In Clang it can be done via passing `-fprofile-generate` option to `CXXFLAGS`. 2. Run instrumented ClickHouse on a sample workload. Here you need to use your usual workload. One of the approaches could be using [ClickBench](https://github.com/ClickHouse/ClickBench) as a sample workload. ClickHouse in the instrumentation mode could work slowly so be ready for that and do not run instrumented ClickHouse in performance-critical environments. -3. Recompile ClickHouse once again with `-fprofile-instr-use` compiler flags and profiles that are collected from the previous step. +3. Recompile ClickHouse once again with `-fprofile-use` compiler flags and profiles that are collected from the previous step. A more detailed guide on how to apply PGO is in the Clang [documentation](https://clang.llvm.org/docs/UsersManual.html#profile-guided-optimization). diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index cfc5a939a0e..ec59cfeee73 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -74,7 +74,7 @@ The maximum number of threads that will be used for fetching data parts from ano Type: UInt64 -Default: 8 +Default: 16 ## background_merges_mutations_concurrency_ratio @@ -136,7 +136,7 @@ The maximum number of threads that will be used for constantly executing some li Type: UInt64 -Default: 128 +Default: 512 ## backup_threads @@ -963,11 +963,9 @@ Lazy loading of dictionaries. If `true`, then each dictionary is loaded on the first use. If the loading is failed, the function that was using the dictionary throws an exception. -If `false`, then the server starts loading all dictionaries at startup. -Dictionaries are loaded in background. -The server doesn't wait at startup until all the dictionaries finish their loading -(exception: if `wait_dictionaries_load_at_startup` is set to `true` - see below). -When a dictionary is used in a query for the first time then the query waits until the dictionary is loaded if it's not loaded yet. +If `false`, then the server loads all dictionaries at startup. +The server will wait at startup until all the dictionaries finish their loading before receiving any connections +(exception: if `wait_dictionaries_load_at_startup` is set to `false` - see below). The default is `true`. @@ -1837,9 +1835,10 @@ Settings: - `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. - `port` – Port for `endpoint`. -- `metrics` – Flag that sets to expose metrics from the [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) table. -- `events` – Flag that sets to expose metrics from the [system.events](../../operations/system-tables/events.md#system_tables-events) table. -- `asynchronous_metrics` – Flag that sets to expose current metrics values from the [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) table. +- `metrics` – Expose metrics from the [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) table. +- `events` – Expose metrics from the [system.events](../../operations/system-tables/events.md#system_tables-events) table. +- `asynchronous_metrics` – Expose current metrics values from the [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) table. +- `errors` - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../../operations/system-tables/asynchronous_metrics.md#system_tables-errors) as well. **Example** @@ -1855,6 +1854,7 @@ Settings: true true true + true @@ -2352,7 +2352,7 @@ Path on the local filesystem to store temporary data for processing large querie ## user_files_path {#user_files_path} -The directory with user files. Used in the table function [file()](../../sql-reference/table-functions/file.md). +The directory with user files. Used in the table function [file()](../../sql-reference/table-functions/file.md), [fileCluster()](../../sql-reference/table-functions/fileCluster.md). **Example** @@ -2397,20 +2397,24 @@ Path to the file that contains: ## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup} -If `false`, then the server will not wait at startup until all the dictionaries finish their loading. -This allows to start ClickHouse faster. +This setting allows to specify behavior if `dictionaries_lazy_load` is `false`. +(If `dictionaries_lazy_load` is `true` this setting doesn't affect anything.) -If `true`, then the server will wait at startup until all the dictionaries finish their loading (successfully or not) -before listening to any connections. -This can make ClickHouse start slowly, however after that some queries can be executed faster -(because they won't have to wait for the used dictionaries to be load). +If `wait_dictionaries_load_at_startup` is `false`, then the server +will start loading all the dictionaries at startup and it will receive connections in parallel with that loading. +When a dictionary is used in a query for the first time then the query will wait until the dictionary is loaded if it's not loaded yet. +Setting `wait_dictionaries_load_at_startup` to `false` can make ClickHouse start faster, however some queries can be executed slower +(because they will have to wait for some dictionaries to be loaded). -The default is `false`. +If `wait_dictionaries_load_at_startup` is `true`, then the server will wait at startup +until all the dictionaries finish their loading (successfully or not) before receiving any connections. + +The default is `true`. **Example** ``` xml -false +true ``` ## zookeeper {#server-settings_zookeeper} @@ -2740,7 +2744,7 @@ ClickHouse will use it to form the proxy URI using the following template: `{pro 10 - + http://resolver:8080/hostname diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index d0acad7b557..c8d54d76704 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -731,11 +731,13 @@ Default value: LZ4. ## max_block_size {#setting-max_block_size} -In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. +In ClickHouse, data is processed by blocks, which are sets of column parts. The internal processing cycles for a single block are efficient but there are noticeable costs when processing each block. -Default value: 65,536. +The `max_block_size` setting indicates the recommended maximum number of rows to include in a single block when loading data from tables. Blocks the size of `max_block_size` are not always loaded from the table: if ClickHouse determines that less data needs to be retrieved, a smaller block is processed. -Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed. +The block size should not be too small to avoid noticeable costs when processing each block. It should also not be too large to ensure that queries with a LIMIT clause execute quickly after processing the first block. When setting `max_block_size`, the goal should be to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. + +Default value: `65,409` ## preferred_block_size_bytes {#preferred-block-size-bytes} @@ -2714,6 +2716,10 @@ Default value: `0`. - [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed) - [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed) +## insert_distributed_sync {#insert_distributed_sync} + +Alias for [`distributed_foreground_insert`](#distributed_foreground_insert). + ## insert_shard_id {#insert_shard_id} If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously. @@ -4795,10 +4801,255 @@ a Tuple( ) ``` +## allow_experimental_statistic {#allow_experimental_statistic} + +Allows defining columns with [statistics](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) and [manipulate statistics](../../engines/table-engines/mergetree-family/mergetree.md#column-statistics). + +## allow_statistic_optimize {#allow_statistic_optimize} + +Allows using statistic to optimize the order of [prewhere conditions](../../sql-reference/statements/select/prewhere.md). + ## analyze_index_with_space_filling_curves If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis. +## query_plan_enable_optimizations {#query_plan_enable_optimizations} + +Toggles query optimization at the query plan level. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable all optimizations at the query plan level +- 1 - Enable optimizations at the query plan level (but individual optimizations may still be disabled via their individual settings) + +Default value: `1`. + +## query_plan_max_optimizations_to_apply + +Limits the total number of optimizations applied to query plan, see setting [query_plan_enable_optimizations](#query_plan_enable_optimizations). +Useful to avoid long optimization times for complex queries. +If the actual number of optimizations exceeds this setting, an exception is thrown. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +Default value: '10000' + +## query_plan_lift_up_array_join + +Toggles a query-plan-level optimization which moves ARRAY JOINs up in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_push_down_limit + +Toggles a query-plan-level optimization which moves LIMITs down in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_split_filter + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Toggles a query-plan-level optimization which splits filters into expressions. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_merge_expressions + +Toggles a query-plan-level optimization which merges consecutive filters. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_filter_push_down + +Toggles a query-plan-level optimization which moves filters down in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_execute_functions_after_sorting + +Toggles a query-plan-level optimization which moves expressions after sorting steps. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_reuse_storage_ordering_for_window_functions + +Toggles a query-plan-level optimization which uses storage sorting when sorting for window functions. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_lift_up_union + +Toggles a query-plan-level optimization which moves larger subtrees of the query plan into union to enable further optimizations. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_distinct_in_order + +Toggles the distinct in-order optimization query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_read_in_order + +Toggles the read in-order optimization query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_aggregation_in_order + +Toggles the aggregation in-order query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `0`. + +## query_plan_remove_redundant_sorting + +Toggles a query-plan-level optimization which removes redundant sorting steps, e.g. in subqueries. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_remove_redundant_distinct + +Toggles a query-plan-level optimization which removes redundant DISTINCT steps. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + ## dictionary_use_async_executor {#dictionary_use_async_executor} Execute a pipeline for reading dictionary source in several threads. It's supported only by dictionaries with local CLICKHOUSE source. @@ -4820,3 +5071,10 @@ When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY` When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section. Default value: `false`. + +## s3_use_adaptive_timeouts {#s3_use_adaptive_timeouts} + +When set to `true` than for all s3 requests first two attempts are made with low send and receive timeouts. +When set to `false` than all attempts are made with identical timeouts. + +Default value: `true`. diff --git a/docs/en/operations/system-tables/blob_storage_log.md b/docs/en/operations/system-tables/blob_storage_log.md new file mode 100644 index 00000000000..2328f7f0346 --- /dev/null +++ b/docs/en/operations/system-tables/blob_storage_log.md @@ -0,0 +1,59 @@ +--- +slug: /en/operations/system-tables/blob_storage_log +--- +# blob_storage_log + +Contains logging entries with information about various blob storage operations such as uploads and deletes. + +Columns: + +- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Date of the event. +- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the event. +- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Time of the event with microseconds precision. +- `event_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the event. Possible values: + - `'Upload'` + - `'Delete'` + - `'MultiPartUploadCreate'` + - `'MultiPartUploadWrite'` + - `'MultiPartUploadComplete'` + - `'MultiPartUploadAbort'` +- `query_id` ([String](../../sql-reference/data-types/string.md)) — Identifier of the query associated with the event, if any. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Identifier of the thread performing the operation. +- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Name of the thread performing the operation. +- `disk_name` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Name of the associated disk. +- `bucket` ([String](../../sql-reference/data-types/string.md)) — Name of the bucket. +- `remote_path` ([String](../../sql-reference/data-types/string.md)) — Path to the remote resource. +- `local_path` ([String](../../sql-reference/data-types/string.md)) — Path to the metadata file on the local system, which references the remote resource. +- `data_size` ([UInt32](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Size of the data involved in the upload event. +- `error` ([String](../../sql-reference/data-types/string.md)) — Error message associated with the event, if any. + +**Example** + +Suppose a blob storage operation uploads a file, and an event is logged: + +```sql +SELECT * FROM system.blob_storage_log WHERE query_id = '7afe0450-504d-4e4b-9a80-cd9826047972' ORDER BY event_date, event_time_microseconds \G +``` + +```text +Row 1: +────── +event_date: 2023-10-31 +event_time: 2023-10-31 16:03:40 +event_time_microseconds: 2023-10-31 16:03:40.481437 +event_type: Upload +query_id: 7afe0450-504d-4e4b-9a80-cd9826047972 +thread_id: 2381740 +disk_name: disk_s3 +bucket: bucket1 +remote_path: rrr/kxo/tbnqtrghgtnxkzgtcrlutwuslgawe +local_path: store/654/6549e8b3-d753-4447-8047-d462df6e6dbe/tmp_insert_all_1_1_0/checksums.txt +data_size: 259 +error: +``` + +In this example, upload operation was associated with the `INSERT` query with ID `7afe0450-504d-4e4b-9a80-cd9826047972`. The local metadata file `store/654/6549e8b3-d753-4447-8047-d462df6e6dbe/tmp_insert_all_1_1_0/checksums.txt` refers to remote path `rrr/kxo/tbnqtrghgtnxkzgtcrlutwuslgawe` in bucket `bucket1` on disk `disk_s3`, with a size of 259 bytes. + +**See Also** + +- [External Disks for Storing Data](../../operations/storing-data.md) diff --git a/docs/en/operations/system-tables/dashboards.md b/docs/en/operations/system-tables/dashboards.md new file mode 100644 index 00000000000..1d6876b9f8d --- /dev/null +++ b/docs/en/operations/system-tables/dashboards.md @@ -0,0 +1,68 @@ +--- +slug: /en/operations/system-tables/dashboards +--- +# dashboards + +Contains queries used by `/dashboard` page accessible though [HTTP interface](/docs/en/interfaces/http.md). +This table can be useful for monitoring and troubleshooting. The table contains a row for every chart in a dashboard. + +:::note +`/dashboard` page can render queries not only from `system.dashboards`, but from any table with the same schema. +This can be useful to create custom dashboards. +::: + +Example: + +``` sql +SELECT * +FROM system.dashboards +WHERE title ILIKE '%CPU%' +``` + +``` text +Row 1: +────── +dashboard: overview +title: CPU Usage (cores) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUVirtualTimeMicroseconds) / 1000000 +FROM system.metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 2: +────── +dashboard: overview +title: CPU Wait +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUWaitMicroseconds) / 1000000 +FROM system.metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 3: +────── +dashboard: overview +title: OS CPU Usage (Userspace) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) +FROM system.asynchronous_metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSUserTimeNormalized' +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 4: +────── +dashboard: overview +title: OS CPU Usage (Kernel) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) +FROM system.asynchronous_metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSSystemTimeNormalized' +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} +``` + +Columns: + +- `dashboard` (`String`) - The dashboard name. +- `title` (`String`) - The title of a chart. +- `query` (`String`) - The query to obtain data to be displayed. diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index f3d3d388c36..e3b0ded96e8 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -14,6 +14,7 @@ Columns: - `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID. - `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment. - `engine_full` ([String](../../sql-reference/data-types/enum.md)) — Parameters of the database engine. +- `database` ([String](../../sql-reference/data-types/string.md)) – Alias for `name`. The `name` column from this system table is used for implementing the `SHOW DATABASES` query. diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index a98c8e50174..a40108a331a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -34,6 +34,10 @@ The `SELECT count() FROM table` query is optimized by default using metadata fro However `SELECT count(nullable_column) FROM table` query can be optimized by enabling the [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. The query `SELECT count(n) FROM table` transforms to `SELECT sum(NOT n.null) FROM table`. +**Improving COUNT(DISTINCT expr) performance** + +If your `COUNT(DISTINCT expr)` query is slow, consider adding a [`GROUP BY`](../../../sql-reference/statements/select/group-by.md) clause as this improves parallelization. You can also use a [projection](../../../sql-reference/statements/alter/projection.md) to create an index on the target column used with `COUNT(DISTINCT target_col)`. + **Examples** Example 1: diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md new file mode 100644 index 00000000000..cc601c097fe --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md @@ -0,0 +1,48 @@ + --- + toc_priority: 112 + --- + + # groupArraySorted {#groupArraySorted} + + Returns an array with the first N items in ascending order. + + ``` sql + groupArraySorted(N)(column) + ``` + + **Arguments** + + - `N` – The number of elements to return. + + If the parameter is omitted, default value is the size of input. + + - `column` – The value (Integer, String, Float and other Generic types). + + **Example** + + Gets the first 10 numbers: + + ``` sql + SELECT groupArraySorted(10)(number) FROM numbers(100) + ``` + + ``` text + ┌─groupArraySorted(10)(number)─┐ + │ [0,1,2,3,4,5,6,7,8,9] │ + └──────────────────────────────┘ + ``` + + + Gets all the String implementations of all numbers in column: + + ``` sql +SELECT groupArraySorted(str) FROM (SELECT toString(number) as str FROM numbers(5)); + + ``` + + ``` text + ┌─groupArraySorted(str)────────┐ + │ ['0','1','2','3','4'] │ + └──────────────────────────────┘ + ``` + \ No newline at end of file diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index b1f2c5bacbb..3bf0e070cae 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -54,6 +54,7 @@ ClickHouse-specific aggregate functions: - [groupArrayMovingAvg](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md) - [groupArrayMovingSum](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md) - [groupArraySample](./grouparraysample.md) +- [groupArraySorted](/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md) - [groupBitAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md) - [groupBitOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md) - [groupBitXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md) diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index 7810f4c5324..db10103282d 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -56,7 +56,7 @@ Functions: ## Related content -- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/) +- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer) - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf) - Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema) - Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index 297d84eb8a5..abe923adeb3 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -20,7 +20,7 @@ Strings are compared byte-by-byte. Note that this may lead to unexpected results A string S1 which has another string S2 as prefix is considered longer than S2. -## equals, `=`, `==` operators +## equals, `=`, `==` operators {#equals} **Syntax** @@ -32,7 +32,7 @@ Alias: - `a = b` (operator) - `a == b` (operator) -## notEquals, `!=`, `<>` operators +## notEquals, `!=`, `<>` operators {#notequals} **Syntax** @@ -44,7 +44,7 @@ Alias: - `a != b` (operator) - `a <> b` (operator) -## less, `<` operator +## less, `<` operator {#less} **Syntax** @@ -55,7 +55,7 @@ less(a, b) Alias: - `a < b` (operator) -## greater, `>` operator +## greater, `>` operator {#greater} **Syntax** @@ -66,7 +66,7 @@ greater(a, b) Alias: - `a > b` (operator) -## lessOrEquals, `<=` operator +## lessOrEquals, `<=` operator {#lessorequals} **Syntax** @@ -77,7 +77,7 @@ lessOrEquals(a, b) Alias: - `a <= b` (operator) -## greaterOrEquals, `>=` operator +## greaterOrEquals, `>=` operator {#greaterorequals} **Syntax** diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 989b39e46c1..1c4f6a678be 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -2518,13 +2518,14 @@ formatDateTime(Time, Format[, Timezone]) Returns time and date values according to the determined format. **Replacement fields** + Using replacement fields, you can define a pattern for the resulting string. “Example” column shows formatting result for `2018-01-02 22:33:44`. -| Placeholder | Description | Example | +| Placeholder | Description | Example | |----------|---------------------------------------------------------|------------| | %a | abbreviated weekday name (Mon-Sun) | Mon | | %b | abbreviated month name (Jan-Dec) | Jan | -| %c | month as an integer number (01-12) | 01 | +| %c | month as an integer number (01-12), see 'Note 3' below | 01 | | %C | year divided by 100 and truncated to integer (00-99) | 20 | | %d | day of the month, zero-padded (01-31) | 02 | | %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/18 | @@ -2538,8 +2539,8 @@ Using replacement fields, you can define a pattern for the resulting string. “ | %i | minute (00-59) | 33 | | %I | hour in 12h format (01-12) | 10 | | %j | day of the year (001-366) | 002 | -| %k | hour in 24h format (00-23) | 22 | -| %l | hour in 12h format (01-12) | 09 | +| %k | hour in 24h format (00-23), see 'Note 3' below | 14 | +| %l | hour in 12h format (01-12), see 'Note 3' below | 09 | | %m | month as an integer number (01-12) | 01 | | %M | full month name (January-December), see 'Note 2' below | January | | %n | new-line character (‘’) | | @@ -2564,6 +2565,8 @@ Note 1: In ClickHouse versions earlier than v23.4, `%f` prints a single zero (0) Note 2: In ClickHouse versions earlier than v23.4, `%M` prints the minute (00-59) instead of the full month name (January-December). The previous behavior can be restored using setting `formatdatetime_parsedatetime_m_is_month_name = 0`. +Note 3: In ClickHouse versions earlier than v23.11, function `parseDateTime()` required leading zeros for formatters `%c` (month) and `%l`/`%k` (hour), e.g. `07`. In later versions, the leading zero may be omitted, e.g. `7`. The previous behavior can be restored using setting `parsedatetime_parse_without_leading_zeros = 0`. Note that function `formatDateTime()` by default still prints leading zeros for `%c` and `%l`/`%k` to not break existing use cases. This behavior can be changed by setting `formatdatetime_format_without_leading_zeros = 1`. + **Example** ``` sql diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index bde2a8a9505..91c04cfded3 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -164,7 +164,7 @@ Consider a list of contacts that may specify multiple ways to contact a customer └──────────┴──────┴───────────┴───────────┘ ``` -The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32`, so it needs to be converted to `String`. +The `mail` and `phone` fields are of type String, but the `telegram` field is `UInt32`, so it needs to be converted to `String`. Get the first available contact method for the customer from the contact list: diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index 9eab2274210..b27668caf0c 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -6,11 +6,9 @@ sidebar_label: Mathematical # Mathematical Functions -All the functions return a Float64 number. Results are generally as close to the actual result as possible, but in some cases less precise than the machine-representable number. - ## e -Returns e. +Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant)) **Syntax** @@ -18,15 +16,22 @@ Returns e. e() ``` +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). + ## pi -Returns π. +Returns π ([Pi](https://en.wikipedia.org/wiki/Pi)). **Syntax** ```sql pi() ``` +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). ## exp @@ -38,6 +43,14 @@ Returns e to the power of the given argument. exp(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## log Returns the natural logarithm of the argument. @@ -50,6 +63,14 @@ log(x) Alias: `ln(x)` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## exp2 Returns 2 to the power of the given argument @@ -60,6 +81,14 @@ Returns 2 to the power of the given argument exp2(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## intExp2 Like `exp` but returns a UInt64. @@ -80,6 +109,14 @@ Returns the binary logarithm of the argument. log2(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## exp10 Returns 10 to the power of the given argument. @@ -90,6 +127,14 @@ Returns 10 to the power of the given argument. exp10(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## intExp10 Like `exp10` but returns a UInt64. @@ -110,6 +155,14 @@ Returns the decimal logarithm of the argument. log10(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## sqrt Returns the square root of the argument. @@ -118,6 +171,14 @@ Returns the square root of the argument. sqrt(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## cbrt Returns the cubic root of the argument. @@ -126,6 +187,14 @@ Returns the cubic root of the argument. cbrt(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## erf If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random variable having a normal distribution with standard deviation `σ` takes the value that is separated from the expected value by more than `x`. @@ -136,6 +205,14 @@ If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random erf(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + **Example** (three sigma rule) @@ -160,6 +237,14 @@ Returns a number close to `1 - erf(x)` without loss of precision for large ‘x erfc(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## lgamma Returns the logarithm of the gamma function. @@ -170,6 +255,14 @@ Returns the logarithm of the gamma function. lgamma(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## tgamma Returns the gamma function. @@ -180,6 +273,14 @@ Returns the gamma function. gamma(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## sin Returns the sine of the argument @@ -190,6 +291,14 @@ Returns the sine of the argument sin(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## cos Returns the cosine of the argument. @@ -200,6 +309,14 @@ Returns the cosine of the argument. cos(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## tan Returns the tangent of the argument. @@ -210,6 +327,14 @@ Returns the tangent of the argument. tan(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## asin Returns the arc sine of the argument. @@ -220,6 +345,14 @@ Returns the arc sine of the argument. asin(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## acos Returns the arc cosine of the argument. @@ -230,6 +363,14 @@ Returns the arc cosine of the argument. acos(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## atan Returns the arc tangent of the argument. @@ -240,6 +381,14 @@ Returns the arc tangent of the argument. atan(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## pow Returns `x` to the power of `y`. @@ -252,6 +401,15 @@ pow(x, y) Alias: `power(x, y)` +**Arguments** + +- `x` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md) +- `y` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md) + +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). + ## cosh Returns the [hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html) of the argument. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 35fd5089bf0..b2a1d5066bb 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -67,7 +67,45 @@ WHERE macro = 'test'; │ test │ Value │ └───────┴──────────────┘ ``` + +## getClientHTTPHeader +Returns the value of specified http header.If there is no such header or the request method is not http, it will throw an exception. +**Syntax** + +```sql +getClientHTTPHeader(name); +``` + +**Arguments** + +- `name` — HTTP header name .[String](../../sql-reference/data-types/string.md#string) + +**Returned value** + +Value of the specified header. +Type:[String](../../sql-reference/data-types/string.md#string). + + +When we use `clickhouse-client` to execute this function, we'll always get empty string, because client doesn't use http protocol. +```sql +SELECT getCientHTTPHeader('test') +``` +result: + +```text +┌─getClientHTTPHeader('test')─┐ +│ │ +└────────────------───────────┘ +``` +Try to use http request: +```shell +echo "select getClientHTTPHeader('X-Clickhouse-User')" | curl -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' 'http://localhost:8123/' -d @- + +#result +default +``` + ## FQDN Returns the fully qualified domain name of the ClickHouse server. @@ -1556,7 +1594,7 @@ initializeAggregation (aggregate_function, arg1, arg2, ..., argN) - Result of aggregation for every row passed to the function. -The return type is the same as the return type of function, that `initializeAgregation` takes as first argument. +The return type is the same as the return type of function, that `initializeAggregation` takes as first argument. **Example** diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 4df987b5e2a..1940993ce0b 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -429,7 +429,7 @@ SELECT format('{} {}', 'Hello', 'World') ## concat -Concatenates the strings listed in the arguments without separator. +Concatenates the given arguments. **Syntax** @@ -439,7 +439,9 @@ concat(s1, s2, ...) **Arguments** -Values of type String or FixedString. +At least one value of arbitrary type. + +Arguments which are not of types [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md) are converted to strings using their default serialization. As this decreases performance, it is not recommended to use non-String/FixedString arguments. **Returned values** @@ -449,6 +451,8 @@ If any of arguments is `NULL`, the function returns `NULL`. **Example** +Query: + ``` sql SELECT concat('Hello, ', 'World!'); ``` @@ -461,6 +465,20 @@ Result: └─────────────────────────────┘ ``` +Query: + +```sql +SELECT concat(42, 144); +``` + +Result: + +```result +┌─concat(42, 144)─┐ +│ 42144 │ +└─────────────────┘ +``` + ## concatAssumeInjective Like [concat](#concat) but assumes that `concat(s1, s2, ...) → sn` is injective. Can be used for optimization of GROUP BY. @@ -526,6 +544,8 @@ Concatenates the given strings with a given separator. concatWithSeparator(sep, expr1, expr2, expr3...) ``` +Alias: `concat_ws` + **Arguments** - sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md). diff --git a/docs/en/sql-reference/functions/time-series-functions.md b/docs/en/sql-reference/functions/time-series-functions.md new file mode 100644 index 00000000000..e183fdcdcd7 --- /dev/null +++ b/docs/en/sql-reference/functions/time-series-functions.md @@ -0,0 +1,47 @@ +--- +slug: /en/sql-reference/functions/time-series-functions +sidebar_position: 172 +sidebar_label: Time Series +--- + +# Time Series Functions + +Below functions are used for time series analysis. + +## seriesPeriodDetectFFT + +Finds the period of the given time series data using FFT +Detect Period in time series data using FFT. +FFT - Fast Fourier transform (https://en.wikipedia.org/wiki/Fast_Fourier_transform) + +**Syntax** + +``` sql +seriesPeriodDetectFFT(series); +``` + +**Arguments** + +- `series` - An array of numeric values + +**Returned value** + +- A real value equal to the period of time series + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT seriesPeriodDetectFFT([1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6]) AS print_0; +``` + +Result: + +``` text +┌───────────print_0──────┐ +│ 3 │ +└────────────────────────┘ +``` diff --git a/docs/en/sql-reference/operators/exists.md b/docs/en/sql-reference/operators/exists.md index 5e96e11b924..86ff422aa5b 100644 --- a/docs/en/sql-reference/operators/exists.md +++ b/docs/en/sql-reference/operators/exists.md @@ -5,7 +5,7 @@ slug: /en/sql-reference/operators/exists The `EXISTS` operator checks how many records are in the result of a subquery. If it is empty, then the operator returns `0`. Otherwise, it returns `1`. -`EXISTS` can be used in a [WHERE](../../sql-reference/statements/select/where.md) clause. +`EXISTS` can also be used in a [WHERE](../../sql-reference/statements/select/where.md) clause. :::tip References to main query tables and columns are not supported in a subquery. @@ -13,12 +13,26 @@ References to main query tables and columns are not supported in a subquery. **Syntax** -```sql -WHERE EXISTS(subquery) +``` sql +EXISTS(subquery) ``` **Example** +Query checking existence of values in a subquery: + +``` sql +SELECT EXISTS(SELECT * FROM numbers(10) WHERE number > 8), EXISTS(SELECT * FROM numbers(10) WHERE number > 11) +``` + +Result: + +``` text +┌─in(1, _subquery1)─┬─in(1, _subquery2)─┐ +│ 1 │ 0 │ +└───────────────────┴───────────────────┘ +``` + Query with a subquery returning several rows: ``` sql diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index dca34d16f25..d28542e0a43 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -16,6 +16,7 @@ Most `ALTER TABLE` queries modify table settings or data: - [INDEX](/docs/en/sql-reference/statements/alter/skipping-index.md) - [CONSTRAINT](/docs/en/sql-reference/statements/alter/constraint.md) - [TTL](/docs/en/sql-reference/statements/alter/ttl.md) +- [STATISTIC](/docs/en/sql-reference/statements/alter/statistic.md) :::note Most `ALTER TABLE` queries are supported only for [\*MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](/docs/en/engines/table-engines/special/merge.md) and [Distributed](/docs/en/engines/table-engines/special/distributed.md). diff --git a/docs/en/sql-reference/statements/alter/statistic.md b/docs/en/sql-reference/statements/alter/statistic.md new file mode 100644 index 00000000000..1c2e45b23fd --- /dev/null +++ b/docs/en/sql-reference/statements/alter/statistic.md @@ -0,0 +1,25 @@ +--- +slug: /en/sql-reference/statements/alter/statistic +sidebar_position: 45 +sidebar_label: STATISTIC +--- + +# Manipulating Column Statistics + +The following operations are available: + +- `ALTER TABLE [db].table ADD STATISTIC (columns list) TYPE type` - Adds statistic description to tables metadata. + +- `ALTER TABLE [db].table DROP STATISTIC (columns list) TYPE type` - Removes statistic description from tables metadata and deletes statistic files from disk. + +- `ALTER TABLE [db].table CLEAR STATISTIC (columns list) TYPE type` - Deletes statistic files from disk. + +- `ALTER TABLE [db.]table MATERIALIZE STATISTIC (columns list) TYPE type` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). + +The first two commands are lightweight in a sense that they only change metadata or remove files. + +Also, they are replicated, syncing statistics metadata via ZooKeeper. + +:::note +Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +::: diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 49843eaff9a..07b5a196096 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -5,7 +5,7 @@ sidebar_label: OPTIMIZE title: "OPTIMIZE Statement" --- -This query tries to initialize an unscheduled merge of data parts for tables. +This query tries to initialize an unscheduled merge of data parts for tables. Note that we generally recommend against using `OPTIMIZE TABLE ... FINAL` (see these [docs](/docs/en/optimize/avoidoptimizefinal)) as its use case is meant for administration, not for daily operations. :::note `OPTIMIZE` can’t fix the `Too many parts` error. diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 59c92e1327e..1510489ce83 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -67,6 +67,12 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. + **See Also** - [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index a871bdaafa9..ad1feb87c60 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -191,12 +191,13 @@ Query the total number of rows from all files `file002` inside any folder in dir SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); ``` -## Virtual Columns +## Virtual Columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. -## Settings +## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. - [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/fileCluster.md b/docs/en/sql-reference/table-functions/fileCluster.md new file mode 100644 index 00000000000..22ca132f136 --- /dev/null +++ b/docs/en/sql-reference/table-functions/fileCluster.md @@ -0,0 +1,85 @@ +--- +slug: /en/sql-reference/table-functions/fileCluster +sidebar_position: 61 +sidebar_label: fileCluster +--- + +# fileCluster Table Function + +Enables simultaneous processing of files matching a specified path across multiple nodes within a cluster. The initiator establishes connections to worker nodes, expands globs in the file path, and delegates file-reading tasks to worker nodes. Each worker node is querying the initiator for the next file to process, repeating until all tasks are completed (all files are read). + +:::note +This function will operate _correctly_ only in case the set of files matching the initially specified path is identical across all nodes, and their content is consistent among different nodes. +In case these files differ between nodes, the return value cannot be predetermined and depends on the order in which worker nodes request tasks from the initiator. +::: + +**Syntax** + +``` sql +fileCluster(cluster_name, path[, format, structure, compression_method]) +``` + +**Arguments** + +- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file also supports [globs](#globs_in_path). +- `format` — [Format](../../interfaces/formats.md#formats) of the files. Type: [String](../../sql-reference/data-types/string.md). +- `structure` — Table structure in `'UserID UInt64, Name String'` format. Determines column names and types. Type: [String](../../sql-reference/data-types/string.md). +- `compression_method` — Compression method. Supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. + +**Returned value** + +A table with the specified format and structure and with data from files matching the specified path. + +**Example** + +Given a cluster named `my_cluster` and given the following value of setting `user_files_path`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ +``` +Also, given there are files `test1.csv` and `test2.csv` inside `user_files_path` of each cluster node, and their content is identical across different nodes: +```bash +$ cat /var/lib/clickhouse/user_files/test1.csv + 1,"file1" + 11,"file11" + +$ cat /var/lib/clickhouse/user_files/test1.csv + 2,"file2" + 22,"file22" +``` + +For example, one can create these files by executing these two queries on every cluster node: +```sql +INSERT INTO TABLE FUNCTION file('file1.csv', 'CSV', 'i UInt32, s String') VALUES (1,'file1'), (11,'file11'); +INSERT INTO TABLE FUNCTION file('file2.csv', 'CSV', 'i UInt32, s String') VALUES (2,'file2'), (22,'file22'); +``` + +Now, read data contents of `test1.csv` and `test2.csv` via `fileCluster` table function: + +```sql +SELECT * from fileCluster( + 'my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY (i, s)""" +) +``` + +``` +┌──i─┬─s──────┐ +│ 1 │ file1 │ +│ 11 │ file11 │ +└────┴────────┘ +┌──i─┬─s──────┐ +│ 2 │ file2 │ +│ 22 │ file22 │ +└────┴────────┘ +``` + + +## Globs in Path {#globs_in_path} + +All patterns supported by [File](../../sql-reference/table-functions/file.md#globs-in-path) table function are supported by FileCluster. + +**See Also** + +- [File table function](../../sql-reference/table-functions/file.md) diff --git a/docs/en/sql-reference/table-functions/fuzzJSON.md b/docs/en/sql-reference/table-functions/fuzzJSON.md new file mode 100644 index 00000000000..74ccb0bcb8a --- /dev/null +++ b/docs/en/sql-reference/table-functions/fuzzJSON.md @@ -0,0 +1,86 @@ +--- +slug: /en/sql-reference/table-functions/fuzzJSON +sidebar_position: 75 +sidebar_label: fuzzJSON +--- + +# fuzzJSON + +Perturbs a JSON string with random variations. + +``` sql +fuzzJSON({ named_collection [option=value [,..]] | json_str[, random_seed] }) +``` + +**Arguments** + +- `named_collection`- A [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md). +- `option=value` - Named collection optional parameters and their values. + - `json_str` (String) - The source string representing structured data in JSON format. + - `random_seed` (UInt64) - Manual random seed for producing stable results. + - `reuse_output` (boolean) - Reuse the output from a fuzzing process as input for the next fuzzer. + - `max_output_length` (UInt64) - Maximum allowable length of the generated or perturbed JSON string. + - `probability` (Float64) - The probability to fuzz a JSON field (a key-value pair). Must be within [0, 1] range. + - `max_nesting_level` (UInt64) - The maximum allowed depth of nested structures within the JSON data. + - `max_array_size` (UInt64) - The maximum allowed size of a JSON array. + - `max_object_size` (UInt64) - The maximum allowed number of fields on a single level of a JSON object. + - `max_string_value_length` (UInt64) - The maximum length of a String value. + - `min_key_length` (UInt64) - The minimum key length. Should be at least 1. + - `max_key_length` (UInt64) - The maximum key length. Should be greater or equal than the `min_key_length`, if specified. + +**Returned Value** + +A table object with a a single column containing perturbed JSON strings. + +## Usage Example + +``` sql +CREATE NAMED COLLECTION json_fuzzer AS json_str='{}'; +SELECT * FROM fuzzJSON(json_fuzzer) LIMIT 3; +``` + +``` text +{"52Xz2Zd4vKNcuP2":true} +{"UPbOhOQAdPKIg91":3405264103600403024} +{"X0QUWu8yT":[]} +``` + +``` sql +SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"name" : "value"}', random_seed=1234) LIMIT 3; +``` + +``` text +{"key":"value", "mxPG0h1R5":"L-YQLv@9hcZbOIGrAn10%GA"} +{"BRE3":true} +{"key":"value", "SWzJdEJZ04nrpSfy":[{"3Q23y":[]}]} +``` + +``` sql +SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"students" : ["Alice", "Bob"]}', reuse_output=true) LIMIT 3; +``` + +``` text +{"students":["Alice", "Bob"], "nwALnRMc4pyKD9Krv":[]} +{"students":["1rNY5ZNs0wU&82t_P", "Bob"], "wLNRGzwDiMKdw":[{}]} +{"xeEk":["1rNY5ZNs0wU&82t_P", "Bob"], "wLNRGzwDiMKdw":[{}, {}]} +``` + +``` sql +SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"students" : ["Alice", "Bob"]}', max_output_length=512) LIMIT 3; +``` + +``` text +{"students":["Alice", "Bob"], "BREhhXj5":true} +{"NyEsSWzJdeJZ04s":["Alice", 5737924650575683711, 5346334167565345826], "BjVO2X9L":true} +{"NyEsSWzJdeJZ04s":["Alice", 5737924650575683711, 5346334167565345826], "BjVO2X9L":true, "k1SXzbSIz":[{}]} +``` + +``` sql +SELECT * FROM fuzzJSON('{"id":1}', 1234) LIMIT 3; +``` + +``` text +{"id":1, "mxPG0h1R5":"L-YQLv@9hcZbOIGrAn10%GA"} +{"BRjE":16137826149911306846} +{"XjKE":15076727133550123563} +``` diff --git a/docs/en/sql-reference/table-functions/gcs.md b/docs/en/sql-reference/table-functions/gcs.md index c49ae6a8501..5ffc20189da 100644 --- a/docs/en/sql-reference/table-functions/gcs.md +++ b/docs/en/sql-reference/table-functions/gcs.md @@ -9,6 +9,10 @@ keywords: [gcs, bucket] Provides a table-like interface to `SELECT` and `INSERT` data from [Google Cloud Storage](https://cloud.google.com/storage/). Requires the [`Storage Object User` IAM role](https://cloud.google.com/storage/docs/access-control/iam-roles). +This is an alias of the [s3 table function](../../sql-reference/table-functions/s3.md). + +If you have multiple replicas in your cluster, you can use the [s3Cluster function](../../sql-reference/table-functions/s3Cluster.md) (which works with GCS) instead to parallelize inserts. + **Syntax** ``` sql diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 678470e9150..31780e30e8e 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -94,8 +94,9 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin ## Virtual Columns -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 8649295e815..dc11259c626 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -9,6 +9,10 @@ keywords: [s3, gcs, bucket] Provides a table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/). This table function is similar to the [hdfs function](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features. +If you have multiple replicas in your cluster, you can use the [s3Cluster function](../../sql-reference/table-functions/s3Cluster.md) instead to parallelize inserts. + +When using the `s3 table function` with [`INSERT INTO...SELECT`](../../sql-reference/statements/insert-into#inserting-the-results-of-select), data is read and inserted in a streaming fashion. Only a few blocks of data reside in memory while the blocks are continuously read from S3 and pushed into the destination table. + **Syntax** ``` sql @@ -224,6 +228,12 @@ FROM s3( LIMIT 5; ``` +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. + ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 675aef54d34..799eb31446a 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -5,7 +5,7 @@ sidebar_label: s3Cluster title: "s3Cluster Table Function" --- -Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterisks in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. +Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) and Google Cloud Storage [Google Cloud Storage](https://cloud.google.com/storage/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterisks in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. **Syntax** diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 859de86f019..4dc6e435b50 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -50,8 +50,9 @@ Character `|` inside patterns is used to specify failover addresses. They are it ## Virtual Columns -- `_path` — Path to the `URL`. -- `_file` — Resource name of the `URL`. +- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. +- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. +- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 00eb830c9ef..7195ee38af6 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -337,7 +337,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 Поддерживаемые типы данных: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`. - Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall). + Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md#equals), [notEquals](../../../sql-reference/functions/comparison-functions.md#notequals), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall). **Примеры** @@ -354,8 +354,8 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | Функция (оператор) / Индекс | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | |------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | | [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | @@ -363,10 +363,10 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | | [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 99ea7894ef8..a5e55e655f9 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -275,12 +275,11 @@ ClickHouse проверяет условия для `min_part_size` и `min_part Отложенная загрузка словарей. -Если `true`, то каждый словарь создаётся при первом использовании. Если словарь не удалось создать, то вызов функции, использующей словарь, сгенерирует исключение. +Если `true`, то каждый словарь загружается при первом использовании. Если словарь не удалось загрузить, то вызов функции, использующей словарь, сгенерирует исключение. -Если `false`, сервер начнет загрузку всех словарей на старте сервера. -Словари загружаются в фоне. Сервер не ждет на старте, пока словари закончат загружаться -(исключение: если `wait_dictionaries_load_at_startup` установлена в `true` - см. ниже). -Когда словарь используется в запросе первый раз, этот запрос будет ждать окончания загрузки словаря, если он еще не загрузился. +Если `false`, все словари будут загружаться на старте сервера. +Сервер будет ждать на старте окончания загрузки всех словарей перед началом обработки соединений +(исключение: если `wait_dictionaries_load_at_startup` установлена в `false` - см. ниже). По умолчанию - `true`. @@ -995,7 +994,7 @@ ClickHouse использует потоки из глобального пул - Положительное целое число. -Значение по умолчанию: 128. +Значение по умолчанию: 512. ## background_fetches_pool_size {#background_fetches_pool_size} @@ -1005,7 +1004,7 @@ ClickHouse использует потоки из глобального пул - Положительное целое число. -Значение по умолчанию: 8. +Значение по умолчанию: 16. ## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size} @@ -1216,6 +1215,7 @@ ClickHouse использует потоки из глобального пул - `metrics` – флаг для экспорта текущих значений метрик из таблицы [system.metrics](../system-tables/metrics.md#system_tables-metrics). - `events` – флаг для экспорта текущих значений метрик из таблицы [system.events](../system-tables/events.md#system_tables-events). - `asynchronous_metrics` – флаг для экспорта текущих значений значения метрик из таблицы [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics). +- `errors` - флаг для экспорта количества ошибок (по кодам) случившихся с момента последнего рестарта сервера. Эта информация может быть получена из таблицы [system.errors](../system-tables/asynchronous_metrics.md#system_tables-errors) **Пример** @@ -1226,6 +1226,7 @@ ClickHouse использует потоки из глобального пул true true true + true ``` @@ -1677,7 +1678,7 @@ TCP порт для защищённого обмена данными с кли ## user_files_path {#server_configuration_parameters-user_files_path} -Каталог с пользовательскими файлами. Используется в табличной функции [file()](../../operations/server-configuration-parameters/settings.md). +Каталог с пользовательскими файлами. Используется в табличных функциях [file()](../../sql-reference/table-functions/fileCluster.md) и [fileCluster()](../../sql-reference/table-functions/fileCluster.md). **Пример** @@ -1722,20 +1723,23 @@ TCP порт для защищённого обмена данными с кли ## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup} -Если `false`, то сервер не будет ждать на старте, пока словари закончат загружаться. -Это позволяет ClickHouse стартовать быстрее. +Эта настройка позволяет указать поведение если `dictionaries_lazy_load` установлено в `false`. +(Если `dictionaries_lazy_load` установлено в `true`, то эта настройка ни на что не влияет.) -Если `true`, то ClickHouse будет ждать на старте до окончания загрузки всех словарей (успешно или нет) -перед тем, как начать принимать соединения. -Это может привести к медленному старту ClickHouse, однако после этого некоторые запросы могут выполняться быстрее -(потому что им не придется ждать окончания загрузки используемых словарей). +Если `wait_dictionaries_load_at_startup` установлено в `false`, то сервер начнет загрузку всех словарей на старте +и будет обрабатывать соединения, не дожидаясь окончания загрузки словарей. +Когда словарь первый раз используется в запросе, запрос будет ждать окончания загрузки этого словаря, если он еще не загрузился. +Установка `wait_dictionaries_load_at_startup` в `false` может помочь ClickHouse стартовать быстрее, однако некоторые запросы могут выполняться медленее (потому что они будут ждать окончания загрузки используемых в них словарей). -По умолчанию - `false`. +Если `wait_dictionaries_load_at_startup` установлено в `true`, то сервер будет ждать окончания загрузки всех словарей на старте +до начала обработки соединений. + +По умолчанию - `true`. **Пример** ``` xml -false +true ``` ## zookeeper {#server-settings_zookeeper} diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md index f66b42977cc..bb9322d5a82 100644 --- a/docs/ru/sql-reference/functions/comparison-functions.md +++ b/docs/ru/sql-reference/functions/comparison-functions.md @@ -23,14 +23,14 @@ sidebar_label: "Функции сравнения" Замечание. До версии 1.1.54134 сравнение знаковых и беззнаковых целых чисел производилось также, как в C++. То есть, вы могли получить неверный результат в таких случаях: SELECT 9223372036854775807 \> -1. С версии 1.1.54134 поведение изменилось и стало математически корректным. -## equals, оператор a = b и a == b {#function-equals} +## equals, оператор a = b и a == b {#equals} -## notEquals, оператор a != b и a `<>` b {#function-notequals} +## notEquals, оператор a != b и a `<>` b {#notequals} -## less, оператор `<` {#function-less} +## less, оператор `<` {#less} -## greater, оператор `>` {#function-greater} +## greater, оператор `>` {#greater} -## lessOrEquals, оператор `<=` {#function-lessorequals} +## lessOrEquals, оператор `<=` {#lessorequals} -## greaterOrEquals, оператор `>=` {#function-greaterorequals} +## greaterOrEquals, оператор `>=` {#greaterorequals} diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index f698554dcf9..7c709619679 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -13,7 +13,7 @@ sidebar_label: file **Синтаксис** ``` sql -file(path [,format] [,structure]) +file(path [,format] [,structure] [,compression]) ``` **Параметры** @@ -21,6 +21,7 @@ file(path [,format] [,structure]) - `path` — относительный путь до файла от [user_files_path](../../sql-reference/table-functions/file.md#server_configuration_parameters-user_files_path). Путь к файлу поддерживает следующие шаблоны в режиме доступа только для чтения `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, `'abc', 'def'` — строки. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат: `'colunmn1_name column1_ype, column2_name column2_type, ...'`. +- `compression` — Используемый тип сжатия для запроса SELECT или желаемый тип сжатия для запроса INSERT. Поддерживаемые типы сжатия: `gz`, `br`, `xz`, `zst`, `lz4` и `bz2`. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/table-functions/fileCluster.md b/docs/ru/sql-reference/table-functions/fileCluster.md new file mode 100644 index 00000000000..7385f4859dc --- /dev/null +++ b/docs/ru/sql-reference/table-functions/fileCluster.md @@ -0,0 +1,84 @@ +--- +slug: /ru/sql-reference/table-functions/fileCluster +sidebar_position: 38 +sidebar_label: fileCluster +--- + +# fileCluster + +Позволяет одновременно обрабатывать файлы, находящиеся по указанному пути, на нескольких узлах внутри кластера. Узел-инициатор устанавливает соединения с рабочими узлами (worker nodes), раскрывает шаблоны в пути к файлам и отдаёт задачи по чтению файлов рабочим узлам. Рабочий узел запрашивает у инициатора путь к следующему файлу для обработки, повторяя до тех пор, пока не завершатся все задачи (то есть пока не будут обработаны все файлы). + +:::note +Эта табличная функция будет работать _корректно_ только в случае, если набор файлов, соответствующих изначально указанному пути, одинаков на всех узлах и содержание этих файлов идентично на различных узлах. В случае, если эти файлы различаются между узлами, результат не предопределён и зависит от очерёдности, с которой рабочие узлы будут запрашивать задачи у инициатора. +::: + +**Синтаксис** + +``` sql +fileCluster(cluster_name, path[, format, structure, compression_method]) +``` + +**Аргументы** + +- `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам. +- `path` — относительный путь до файла от [user_files_path](../../sql-reference/table-functions/file.md#server_configuration_parameters-user_files_path). Путь к файлу поддерживает [шаблоны поискаglobs](#globs_in_path). +- `format` — [формат](../../interfaces/formats.md#formats) файла. +- `structure` — структура таблицы. Формат: `'colunmn1_name column1_ype, column2_name column2_type, ...'`. +- `compression_method` — Используемый тип сжатия. Поддерживаемые типы: `gz`, `br`, `xz`, `zst`, `lz4` и `bz2`. + +**Возвращаемое значение** + +Таблица с указанным форматом и структурой, содержащая данные из файлов, соответствующих указанному пути. + +**Пример** +Пусть есть кластер с именем `my_cluster`, а также установлено нижеследующее значение параметра `user_files_path`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ +``` + +Пусть также на каждом узле кластера в директории `user_files_path` находятся файлы `test1.csv` и `test2.csv`, и их содержимое идентично на разных узлах: +```bash +$ cat /var/lib/clickhouse/user_files/test1.csv + 1,"file1" + 11,"file11" + +$ cat /var/lib/clickhouse/user_files/test1.csv + 2,"file2" + 22,"file22" +``` + +Например, эти файлы можно создать, выполнив на каждом узле два запроса: +```sql +INSERT INTO TABLE FUNCTION file('file1.csv', 'CSV', 'i UInt32, s String') VALUES (1,'file1'), (11,'file11'); +INSERT INTO TABLE FUNCTION file('file2.csv', 'CSV', 'i UInt32, s String') VALUES (2,'file2'), (22,'file22'); +``` + +Прочитаем содержимое файлов `test1.csv` и `test2.csv` с помощью табличной функции `fileCluster`: + +```sql +SELECT * from fileCluster( + 'my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY (i, s)""" +) +``` + +``` +┌──i─┬─s──────┐ +│ 1 │ file1 │ +│ 11 │ file11 │ +└────┴────────┘ +┌──i─┬─s──────┐ +│ 2 │ file2 │ +│ 22 │ file22 │ +└────┴────────┘ +``` + + +## Шаблоны поиска в компонентах пути {#globs_in_path} + +Поддерживаются все шаблоны поиска, что поддерживаются табличной функцией [File](../../sql-reference/table-functions/file.md#globs-in-path). + +**Смотрите также** + +- [File (табличная функция)](../../sql-reference/table-functions/file.md) diff --git a/docs/ru/sql-reference/table-functions/numbers.md b/docs/ru/sql-reference/table-functions/numbers.md index 5a6edc0e988..f7e52793a3c 100644 --- a/docs/ru/sql-reference/table-functions/numbers.md +++ b/docs/ru/sql-reference/table-functions/numbers.md @@ -7,7 +7,7 @@ sidebar_label: numbers # numbers {#numbers} `numbers(N)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `0` до `N-1`. -`numbers(N, M)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `N` to `(N + M - 1)`. +`numbers(N, M)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `N` до `(N + M - 1)`. Так же как и таблица `system.numbers` может использоваться для тестов и генерации последовательных значений. Функция `numbers(N, M)` работает более эффективно, чем выборка из `system.numbers`. diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index cec4cb09047..c738ae0f24c 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -66,7 +66,7 @@ ORDER BY expr - `PARTITION BY` — [分区键](custom-partitioning-key.md) ,可选项。 - 大多数情况下,不需要分使用区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 + 大多数情况下,不需要使用分区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的列。分区名的格式会是 `"YYYYMM"` 。 @@ -349,8 +349,8 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达 | 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | | ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ | -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | @@ -358,10 +358,10 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达 | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | | [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | diff --git a/docs/zh/sql-reference/functions/comparison-functions.md b/docs/zh/sql-reference/functions/comparison-functions.md index ef3adf427f1..ed33dc40531 100644 --- a/docs/zh/sql-reference/functions/comparison-functions.md +++ b/docs/zh/sql-reference/functions/comparison-functions.md @@ -21,14 +21,14 @@ sidebar_label: 比较函数 字符串按字节进行比较。较短的字符串小于以其开头并且至少包含一个字符的所有字符串。 -## 等于,a=b和a==b 运算符 {#equals-a-b-and-a-b-operator} +## 等于,a=b和a==b 运算符 {#equals} -## 不等于,a!=b和a<>b 运算符 {#notequals-a-operator-b-and-a-b} +## 不等于,a!=b和a<>b 运算符 {#notequals} -## 少, < 运算符 {#less-operator} +## 少, < 运算符 {#less} -## 大于, > 运算符 {#greater-operator} +## 大于, > 运算符 {#greater} -## 小于等于, <= 运算符 {#lessorequals-operator} +## 小于等于, <= 运算符 {#lessorequals} -## 大于等于, >= 运算符 {#greaterorequals-operator} +## 大于等于, >= 运算符 {#greaterorequals} diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml index 95532726d94..238126f95fd 100644 --- a/packages/clickhouse-common-static.yaml +++ b/packages/clickhouse-common-static.yaml @@ -44,6 +44,8 @@ contents: dst: /usr/bin/clickhouse-odbc-bridge - src: root/usr/share/bash-completion/completions dst: /usr/share/bash-completion/completions +- src: root/usr/share/clickhouse + dst: /usr/share/clickhouse # docs - src: ../AUTHORS dst: /usr/share/doc/clickhouse-common-static/AUTHORS diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index 5e2bc7c7412..7894129b8e3 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -52,8 +52,6 @@ contents: dst: /lib/systemd/system/clickhouse-server.service - src: root/usr/bin/clickhouse-copier dst: /usr/bin/clickhouse-copier -- src: root/usr/bin/clickhouse-report - dst: /usr/bin/clickhouse-report - src: root/usr/bin/clickhouse-server dst: /usr/bin/clickhouse-server # clickhouse-keeper part diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index fcb52d0fc0f..b3a5af6d6c9 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -63,8 +63,6 @@ option (ENABLE_CLICKHOUSE_SU "A tool similar to 'su'" ${ENABLE_CLICKHOUSE_ALL}) option (ENABLE_CLICKHOUSE_DISKS "A tool to manage disks" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_REPORT "A tiny tool to collect a clickhouse-server state" ${ENABLE_CLICKHOUSE_ALL}) - if (NOT ENABLE_NURAFT) # RECONFIGURE_MESSAGE_LEVEL should not be used here, # since ENABLE_NURAFT is set to OFF for FreeBSD and Darwin. @@ -390,9 +388,6 @@ if (ENABLE_CLICKHOUSE_SU) install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-su" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-su) endif () -if (ENABLE_CLICKHOUSE_REPORT) - include(${ClickHouse_SOURCE_DIR}/utils/report/CMakeLists.txt) -endif () if (ENABLE_CLICKHOUSE_KEEPER) if (NOT BUILD_STANDALONE_KEEPER AND CREATE_KEEPER_SYMLINK) @@ -462,3 +457,10 @@ endif() if (ENABLE_FUZZING) add_compile_definitions(FUZZING_MODE=1) endif () + +if (TARGET ch_contrib::protobuf) + get_property(google_proto_files TARGET ch_contrib::protobuf PROPERTY google_proto_files) + foreach (proto_file IN LISTS google_proto_files) + install(FILES ${proto_file} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse/protos/google/protobuf) + endforeach() +endif () diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index ed3d4a1ea69..d6b8b38d84d 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -46,6 +46,7 @@ namespace CurrentMetrics { extern const Metric LocalThread; extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; } namespace DB @@ -107,7 +108,7 @@ public: settings(settings_), shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())), - pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency) + pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, concurrency) { const auto secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable; size_t connections_cnt = std::max(ports_.size(), hosts_.size()); diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index cc213b19b71..3233e40de31 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -306,6 +306,10 @@ void Client::initialize(Poco::Util::Application & self) /// Set path for format schema files if (config().has("format_schema_path")) global_context->setFormatSchemaPath(fs::weakly_canonical(config().getString("format_schema_path"))); + + /// Set the path for google proto files + if (config().has("google_protos_path")) + global_context->setGoogleProtosPath(fs::weakly_canonical(config().getString("google_protos_path"))); } @@ -325,7 +329,7 @@ try processConfig(); adjustSettings(); - initTtyBuffer(toProgressOption(config().getString("progress", "default"))); + initTTYBuffer(toProgressOption(config().getString("progress", "default"))); { // All that just to set DB::CurrentThread::get().getGlobalContext() @@ -1238,7 +1242,6 @@ void Client::processConfig() global_context->setCurrentQueryId(query_id); } print_stack_trace = config().getBool("stacktrace", false); - logging_initialized = true; if (config().has("multiquery")) is_multiquery = true; @@ -1460,7 +1463,6 @@ int mainEntryClickHouseClient(int argc, char ** argv) DB::Client client; // Initialize command line options client.init(argc, argv); - /// Initialize config file return client.run(); } catch (const DB::Exception & e) diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml index dbfb267d778..d0deb818c1e 100644 --- a/programs/client/clickhouse-client.xml +++ b/programs/client/clickhouse-client.xml @@ -37,7 +37,7 @@ {display_name} \e[1;31m:)\e[0m - + + /usr/share/clickhouse/protos/ settings_push; - auto connection = task_table.cluster_push->getAnyShardInfo().pool->get(timeouts, &settings_push, true); + auto connection = task_table.cluster_push->getAnyShardInfo().pool->get(timeouts, settings_push, true); String create_query = getRemoteCreateTable(task_shard.task_table.table_push, *connection, settings_push); ParserCreateQuery parser_create_query; @@ -1785,7 +1786,7 @@ String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, C ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & timeouts, TaskShard & task_shard) { /// Fetch and parse (possibly) new definition - auto connection_entry = task_shard.info.pool->get(timeouts, &task_cluster->settings_pull, true); + auto connection_entry = task_shard.info.pool->get(timeouts, task_cluster->settings_pull, true); String create_query_pull_str = getRemoteCreateTable( task_shard.task_table.table_pull, *connection_entry, diff --git a/programs/disks/CommandCopy.cpp b/programs/disks/CommandCopy.cpp index 296fc708411..421e4038d12 100644 --- a/programs/disks/CommandCopy.cpp +++ b/programs/disks/CommandCopy.cpp @@ -57,7 +57,7 @@ public: String relative_path_from = validatePathAndGetAsRelative(path_from); String relative_path_to = validatePathAndGetAsRelative(path_to); - disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* read_settings= */ {}, /* write_settings= */ {}); + disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* read_settings= */ {}, /* write_settings= */ {}, /* cancellation_hook= */ {}); } }; } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 6df1bbaa329..e04e669abae 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -556,7 +556,8 @@ catch (...) { /// Poco does not provide stacktrace. tryLogCurrentException("Application"); - throw; + auto code = getCurrentExceptionCode(); + return code ? code : -1; } diff --git a/programs/keeper/keeper_config.xml b/programs/keeper/keeper_config.xml index 3d728e2bfdf..4cf84cffc86 100644 --- a/programs/keeper/keeper_config.xml +++ b/programs/keeper/keeper_config.xml @@ -41,6 +41,7 @@ 10000 100000 information + false diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 9337dcadce9..f3b84fa3eb1 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -495,7 +495,7 @@ try processConfig(); adjustSettings(); - initTtyBuffer(toProgressOption(config().getString("progress", "default"))); + initTTYBuffer(toProgressOption(config().getString("progress", "default"))); applyCmdSettings(global_context); @@ -563,9 +563,6 @@ catch (...) void LocalServer::updateLoggerLevel(const String & logs_level) { - if (!logging_initialized) - return; - config().setString("logger.level", logs_level); updateLevels(config(), logger()); } @@ -607,21 +604,13 @@ void LocalServer::processConfig() Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new OwnFormattingChannel(pf, new Poco::SimpleFileChannel(server_logs_file)); Poco::Logger::root().setChannel(log); - logging_initialized = true; - } - else if (logging || is_interactive) - { - config().setString("logger", "logger"); - auto log_level_default = is_interactive && !logging ? "none" : level; - config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); - buildLoggers(config(), logger(), "clickhouse-local"); - logging_initialized = true; } else { - Poco::Logger::root().setLevel("none"); - Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - logging_initialized = false; + config().setString("logger", "logger"); + auto log_level_default = logging ? level : "fatal"; + config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); + buildLoggers(config(), logger(), "clickhouse-local"); } shared_context = Context::createShared(); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index efb5ccb3203..8519532f788 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1163,6 +1163,8 @@ try CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements); #endif + NamedCollectionUtils::loadIfNot(); + /// Initialize main config reloader. std::string include_from_path = config().getString("include_from", "/etc/metrika.xml"); @@ -1277,6 +1279,8 @@ try global_context->setHTTPHeaderFilter(*config); global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop); + global_context->setClientHTTPHeaderForbiddenHeaders(server_settings_.get_client_http_header_forbidden_headers); + global_context->setAllowGetHTTPHeaderFunction(server_settings_.allow_get_client_http_header); global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop); ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited; @@ -1573,6 +1577,10 @@ try global_context->setFormatSchemaPath(format_schema_path); fs::create_directories(format_schema_path); + /// Set the path for google proto files + if (config().has("google_protos_path")) + global_context->setGoogleProtosPath(fs::weakly_canonical(config().getString("google_protos_path"))); + /// Set path for filesystem caches fs::path filesystem_caches_path(config().getString("filesystem_caches_path", "")); if (!filesystem_caches_path.empty()) @@ -1823,7 +1831,7 @@ try { global_context->loadOrReloadDictionaries(config()); - if (config().getBool("wait_dictionaries_load_at_startup", false)) + if (!config().getBool("dictionaries_lazy_load", true) && config().getBool("wait_dictionaries_load_at_startup", true)) global_context->waitForDictionariesLoad(); } catch (...) @@ -1970,7 +1978,8 @@ catch (...) { /// Poco does not provide stacktrace. tryLogCurrentException("Application"); - throw; + auto code = getCurrentExceptionCode(); + return code ? code : -1; } std::unique_ptr Server::buildProtocolStackFromConfig( diff --git a/programs/server/config.d/path.xml b/programs/server/config.d/path.xml index 46af5bfb64b..7afada689d7 100644 --- a/programs/server/config.d/path.xml +++ b/programs/server/config.d/path.xml @@ -3,6 +3,7 @@ ./tmp/ ./user_files/ ./format_schemas/ + ../../contrib/google-protobuf/src/ ./access/ ./top_level_domains/ diff --git a/programs/server/config.xml b/programs/server/config.xml index 7800aa51166..f367b97cec1 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -830,13 +830,13 @@ And also (and which is more important), the initial_user will be used as current user for the query. - Right now the protocol is pretty simple and it only takes into account: + Right now the protocol is pretty simple, and it only takes into account: - cluster name - query - Also it will be nice if the following will be implemented: - - source hostname (see interserver_http_host), but then it will depends from DNS, - it can use IP address instead, but then the you need to get correct on the initiator node. + Also, it will be nice if the following will be implemented: + - source hostname (see interserver_http_host), but then it will depend on DNS, + it can use IP address instead, but then you need to get correct on the initiator node. - target hostname / ip address (same notes as for source hostname) - time-based security tokens --> @@ -1248,6 +1248,25 @@ 7500 + + + system + s3queue_log
+ toYYYYMM(event_date) + 7500 +
+ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+ - false + true *_function.*ml @@ -1409,6 +1428,10 @@ --> /var/lib/clickhouse/format_schemas/ + + /usr/share/clickhouse/protos/ + - + 1 + + + 1 10 + + + /usr/share/clickhouse/protos/ + diff --git a/tests/config/config.d/blob_storage_log.xml b/tests/config/config.d/blob_storage_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/config/config.d/blob_storage_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/config/config.d/forbidden_get_client_http_headers.xml b/tests/config/config.d/forbidden_get_client_http_headers.xml new file mode 100644 index 00000000000..cfecb015260 --- /dev/null +++ b/tests/config/config.d/forbidden_get_client_http_headers.xml @@ -0,0 +1,4 @@ + + FORBIDDEN-KEY1,FORBIDDEN-KEY2 + 1 + diff --git a/tests/config/config.d/keeper_port.xml b/tests/config/config.d/keeper_port.xml index 1e646cd07a7..b87014d2485 100644 --- a/tests/config/config.d/keeper_port.xml +++ b/tests/config/config.d/keeper_port.xml @@ -20,6 +20,8 @@ 0 0 + 0 + 1 diff --git a/tests/config/config.d/s3_storage_policy_by_default.xml b/tests/config/config.d/s3_storage_policy_by_default.xml index dd93a317a77..4e3d9636daf 100644 --- a/tests/config/config.d/s3_storage_policy_by_default.xml +++ b/tests/config/config.d/s3_storage_policy_by_default.xml @@ -25,4 +25,5 @@ s3 + cached_s3 diff --git a/tests/config/install.sh b/tests/config/install.sh index c31275cdcf2..96f35219bc6 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -15,9 +15,11 @@ mkdir -p $DEST_SERVER_PATH/config.d/ mkdir -p $DEST_SERVER_PATH/users.d/ mkdir -p $DEST_CLIENT_PATH +ln -sf $SRC_PATH/config.d/forbidden_get_client_http_headers.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/zookeeper_write.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/blob_storage_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/enable_access_control_improvements.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/ @@ -152,6 +154,7 @@ if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/users.d/s3_cache.xml $DEST_SERVER_PATH/users.d/ + ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/ fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/tests/config/users.d/s3_cache_new.xml b/tests/config/users.d/s3_cache_new.xml new file mode 100644 index 00000000000..0afa3d68fc6 --- /dev/null +++ b/tests/config/users.d/s3_cache_new.xml @@ -0,0 +1,7 @@ + + + + 10 + + + diff --git a/tests/integration/CMakeLists.txt b/tests/integration/CMakeLists.txt deleted file mode 100644 index ed12cf5b4a3..00000000000 --- a/tests/integration/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse) - -find_program(DOCKER_CMD docker) -find_program(DOCKER_COMPOSE_CMD docker-compose) -find_program(PYTEST_CMD pytest) -find_program(SUDO_CMD sudo) - -# will mount only one binary to docker container - build with .so cant work -if(DOCKER_CMD) - if(INTEGRATION_USE_RUNNER AND SUDO_CMD) - add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/programs/server/) - message(STATUS "Using tests in docker with runner SUDO=${SUDO_CMD}; DOCKER=${DOCKER_CMD};") - endif() - if(NOT INTEGRATION_USE_RUNNER AND DOCKER_COMPOSE_CMD AND PYTEST_CMD) - # To run one test with debug: - # cmake . -DPYTEST_OPT="-ss;test_cluster_copier" - add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/programs/server/" "CLICKHOUSE_TESTS_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/tests/config/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) - message(STATUS "Using tests in docker DOCKER=${DOCKER_CMD}; DOCKER_COMPOSE=${DOCKER_COMPOSE_CMD}; PYTEST=${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}") - endif() -endif() diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 729b30ba934..5e4bb32cf94 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -583,6 +583,7 @@ class ClickHouseCluster: self.rabbitmq_host = "rabbitmq1" self.rabbitmq_ip = None self.rabbitmq_port = 5672 + self.rabbitmq_secure_port = 5671 self.rabbitmq_dir = p.abspath(p.join(self.instances_dir, "rabbitmq")) self.rabbitmq_cookie_file = os.path.join(self.rabbitmq_dir, "erlang.cookie") self.rabbitmq_logs_dir = os.path.join(self.rabbitmq_dir, "logs") @@ -1316,6 +1317,7 @@ class ClickHouseCluster: self.with_rabbitmq = True env_variables["RABBITMQ_HOST"] = self.rabbitmq_host env_variables["RABBITMQ_PORT"] = str(self.rabbitmq_port) + env_variables["RABBITMQ_SECURE_PORT"] = str(self.rabbitmq_secure_port) env_variables["RABBITMQ_LOGS"] = self.rabbitmq_logs_dir env_variables["RABBITMQ_LOGS_FS"] = "bind" env_variables["RABBITMQ_COOKIE_FILE"] = self.rabbitmq_cookie_file @@ -4128,14 +4130,14 @@ class ClickHouseInstance: [ "bash", "-c", - "echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/system.sql", + "if [ ! -f /var/lib/clickhouse/metadata/system.sql ]; then echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/system.sql; fi", ] ) self.exec_in_container( [ "bash", "-c", - "echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/default.sql", + "if [ ! -f /var/lib/clickhouse/metadata/default.sql ]; then echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/default.sql; fi", ] ) self.exec_in_container( diff --git a/tests/integration/helpers/corrupt_part_data_on_disk.py b/tests/integration/helpers/corrupt_part_data_on_disk.py index e253ce23d83..a84a6e825e6 100644 --- a/tests/integration/helpers/corrupt_part_data_on_disk.py +++ b/tests/integration/helpers/corrupt_part_data_on_disk.py @@ -1,19 +1,21 @@ -def corrupt_part_data_on_disk(node, table, part_name): +def corrupt_part_data_on_disk(node, table, part_name, file_ext=".bin", database=None): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( - table, part_name + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}' {}".format( + table, + part_name, + f"AND database = '{database}'" if database is not None else "", ) ).strip() - corrupt_part_data_by_path(node, part_path) + corrupt_part_data_by_path(node, part_path, file_ext) -def corrupt_part_data_by_path(node, part_path): +def corrupt_part_data_by_path(node, part_path, file_ext=".bin"): print("Corrupting part", part_path, "at", node.name) print( "Will corrupt: ", node.exec_in_container( - ["bash", "-c", "cd {p} && ls *.bin | head -n 1".format(p=part_path)] + ["bash", "-c", f"cd {part_path} && ls *{file_ext} | head -n 1"] ), ) @@ -21,9 +23,7 @@ def corrupt_part_data_by_path(node, part_path): [ "bash", "-c", - "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format( - p=part_path - ), + f"cd {part_path} && ls *{file_ext} | head -n 1 | xargs -I{{}} sh -c 'truncate -s -1 $1' -- {{}}", ], privileged=True, ) diff --git a/tests/integration/helpers/keeper_config1.xml b/tests/integration/helpers/keeper_config1.xml index 7702aecba9c..12c6c0b78b6 100644 --- a/tests/integration/helpers/keeper_config1.xml +++ b/tests/integration/helpers/keeper_config1.xml @@ -11,6 +11,9 @@ 2181 + + az-zoo1 + 1 diff --git a/tests/integration/helpers/keeper_config2.xml b/tests/integration/helpers/keeper_config2.xml index 2a1a1c1003c..2afff2f5e59 100644 --- a/tests/integration/helpers/keeper_config2.xml +++ b/tests/integration/helpers/keeper_config2.xml @@ -12,6 +12,10 @@ 2181 2 + + az-zoo2 + 1 + 10000 diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index d056225fee4..33dd85aceaf 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -92,5 +92,9 @@ "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_grpc", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_tcp_and_others", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_setting_in_query", - "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load" + "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load", + + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_stop_moves_query", + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_table_detach", + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect" ] diff --git a/tests/queries/0_stateless/01414_optimize_any_bug.reference b/tests/integration/test_backup_restore_keeper_map/__init__.py similarity index 100% rename from tests/queries/0_stateless/01414_optimize_any_bug.reference rename to tests/integration/test_backup_restore_keeper_map/__init__.py diff --git a/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml b/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml new file mode 100644 index 00000000000..b99a51cd56d --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml @@ -0,0 +1,13 @@ + + + + + local + /backups/ + + + + + backups + + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml b/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml new file mode 100644 index 00000000000..91d7b9d3f8f --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml @@ -0,0 +1,3 @@ + + /keeper_map_tables + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml b/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml new file mode 100644 index 00000000000..5cf07c69fd6 --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml @@ -0,0 +1,22 @@ + + + + + + node1 + 9000 + + + node2 + 9000 + + + + + node3 + 9000 + + + + + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml b/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml new file mode 100644 index 00000000000..1283f28a8cb --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml @@ -0,0 +1,11 @@ + + + + 1000 + 1 + 1 + 42 + 0.002 + + + diff --git a/tests/integration/test_backup_restore_keeper_map/test.py b/tests/integration/test_backup_restore_keeper_map/test.py new file mode 100644 index 00000000000..c401f482c3f --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/test.py @@ -0,0 +1,136 @@ +from time import sleep +import pytest +from helpers.cluster import ClickHouseCluster + + +cluster = ClickHouseCluster(__file__) + +main_configs = [ + "configs/remote_servers.xml", + "configs/backups_disk.xml", + "configs/keeper_map_path_prefix.xml", +] + +user_configs = [ + "configs/zookeeper_retries.xml", +] + +node1 = cluster.add_instance( + "node1", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node1", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, +) + +node2 = cluster.add_instance( + "node2", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node2", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, +) + + +node3 = cluster.add_instance( + "node3", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node3", "shard": "shard2"}, + with_zookeeper=True, + stay_alive=True, +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +backup_id_counter = 0 + + +def new_backup_name(base_name): + global backup_id_counter + backup_id_counter += 1 + return f"Disk('backups', '{base_name}{backup_id_counter}')" + + +@pytest.mark.parametrize("deduplicate_files", [0, 1]) +def test_on_cluster(deduplicate_files): + database_name = f"keeper_backup{deduplicate_files}" + node1.query_with_retry(f"CREATE DATABASE {database_name} ON CLUSTER cluster") + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper1 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key" + ) + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper2 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key" + ) + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper3 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster2') PRIMARY KEY key" + ) + node1.query_with_retry( + f"INSERT INTO {database_name}.keeper2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5" + ) + node1.query_with_retry( + f"INSERT INTO {database_name}.keeper3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5" + ) + + expected_result = "".join(f"{i}\ttest{i}\n" for i in range(5)) + + def verify_data(): + for node in [node1, node2, node3]: + for i in range(1, 4): + result = node.query_with_retry( + f"SELECT key, value FROM {database_name}.keeper{i} ORDER BY key FORMAT TSV" + ) + assert result == expected_result + + verify_data() + + backup_name = new_backup_name("test_on_cluster") + node1.query( + f"BACKUP DATABASE {database_name} ON CLUSTER cluster TO {backup_name} SETTINGS async = false, deduplicate_files = {deduplicate_files};" + ) + + node1.query(f"DROP DATABASE {database_name} ON CLUSTER cluster SYNC;") + + def apply_for_all_nodes(f): + for node in [node1, node2, node3]: + f(node) + + def change_keeper_map_prefix(node): + node.replace_config( + "/etc/clickhouse-server/config.d/keeper_map_path_prefix.xml", + """ + + /different_path/keeper_map + +""", + ) + + apply_for_all_nodes(lambda node: node.stop_clickhouse()) + apply_for_all_nodes(change_keeper_map_prefix) + apply_for_all_nodes(lambda node: node.start_clickhouse()) + + node1.query( + f"RESTORE DATABASE {database_name} ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;" + ) + + verify_data() + + node1.query(f"DROP TABLE {database_name}.keeper3 ON CLUSTER cluster SYNC;") + node1.query( + f"RESTORE TABLE {database_name}.keeper3 ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;" + ) + + verify_data() diff --git a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py index aea82c6b559..ab37846db9a 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py @@ -214,7 +214,13 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine): while time.time() < end_time: table_name = f"mydb.tbl{randint(1, num_nodes)}" node = nodes[randint(0, num_nodes - 1)] - node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") + # "DROP TABLE IF EXISTS" still can throw some errors (e.g. "WRITE locking attempt on node0 has timed out!") + # So we use query_and_get_answer_with_error() to ignore any errors. + # `lock_acquire_timeout` is also reduced because we don't wait our test to wait too long. + node.query_and_get_answer_with_error( + f"DROP TABLE IF EXISTS {table_name} SYNC", + settings={"lock_acquire_timeout": 10}, + ) def rename_tables(): while time.time() < end_time: diff --git a/tests/integration/test_backup_restore_s3/configs/blob_log.xml b/tests/integration/test_backup_restore_s3/configs/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_backup_restore_s3/configs/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index f8ec39d240b..55d40b14ea7 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -12,6 +12,7 @@ node = cluster.add_instance( "configs/disk_s3.xml", "configs/named_collection_s3_backups.xml", "configs/s3_settings.xml", + "configs/blob_log.xml", ], user_configs=[ "configs/zookeeper_retries.xml", @@ -51,10 +52,12 @@ def get_events_for_query(query_id: str) -> Dict[str, int]: """ ) ) - return { + result = { event: int(value) for event, value in [line.split("\t") for line in events.lines] } + result["query_id"] = query_id + return result def format_settings(settings): @@ -118,7 +121,7 @@ def check_backup_and_restore( ) -def check_system_tables(): +def check_system_tables(backup_query_id=None): disks = [ tuple(disk.split("\t")) for disk in node.query("SELECT name, type FROM system.disks").split("\n") @@ -136,6 +139,14 @@ def check_system_tables(): if expected_disk not in disks: raise AssertionError(f"Missed {expected_disk} in {disks}") + if backup_query_id is not None: + blob_storage_log = node.query( + f"SELECT count() FROM system.blob_storage_log WHERE query_id = '{backup_query_id}' AND error = '' AND event_type = 'Upload'" + ).strip() + assert int(blob_storage_log) >= 1, node.query( + "SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ) + @pytest.mark.parametrize( "storage_policy, to_disk", @@ -179,8 +190,8 @@ def test_backup_to_s3(): backup_destination = ( f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" ) - check_backup_and_restore(storage_policy, backup_destination) - check_system_tables() + (backup_events, _) = check_backup_and_restore(storage_policy, backup_destination) + check_system_tables(backup_events["query_id"]) def test_backup_to_s3_named_collection(): @@ -203,6 +214,15 @@ def test_backup_to_s3_multipart(): f"copyDataToS3File: Multipart upload has completed. Bucket: root, Key: data/backups/multipart/{backup_name}" ) + backup_query_id = backup_events["query_id"] + blob_storage_log = node.query( + f"SELECT countIf(event_type == 'MultiPartUploadCreate') * countIf(event_type == 'MultiPartUploadComplete') * countIf(event_type == 'MultiPartUploadWrite') " + f"FROM system.blob_storage_log WHERE query_id = '{backup_query_id}' AND error = ''" + ).strip() + assert int(blob_storage_log) >= 1, node.query( + "SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ) + s3_backup_events = ( "WriteBufferFromS3Microseconds", "WriteBufferFromS3Bytes", diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index 6f21b184a95..847483f2b9b 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="19.16.9.37", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index cf258987cbf..94bc1d3bfc9 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="21.3", + tag="20.8.11.17", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_convert_ordinary.py b/tests/integration/test_backward_compatibility/test_convert_ordinary.py index 36facdd59b1..034a68e0f30 100644 --- a/tests/integration/test_backward_compatibility/test_convert_ordinary.py +++ b/tests/integration/test_backward_compatibility/test_convert_ordinary.py @@ -5,7 +5,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="19.16.9.37", stay_alive=True, with_zookeeper=True, with_installed_binary=True, diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py index c68468aad75..d47ae3aa255 100644 --- a/tests/integration/test_backward_compatibility/test_cte_distributed.py +++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.3.14", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index 94771a624e2..b6b6ef28de5 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -153,6 +153,9 @@ def test_string_functions(start_cluster): # mandatory or optional). The former lib produces a value based on implicit padding, the latter lib throws an error. "FROM_BASE64", "base64Decode", + # PR #56913 (in v23.11) corrected the way tryBase64Decode() behaved with invalid inputs. Old versions return garbage, new versions + # return an empty string (as it was always documented). + "tryBase64Decode", # Removed in 23.9 "meiliMatch", ] diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py index 8564c6b5952..d38bece7855 100644 --- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py +++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py @@ -11,7 +11,7 @@ upstream_node = cluster.add_instance("upstream_node", allow_analyzer=False) old_node = cluster.add_instance( "old_node", image="clickhouse/clickhouse-server", - tag="22.5.1.2079", + tag="22.6", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py index 96b41c81384..5261a279a4f 100644 --- a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.1", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -16,7 +16,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.1", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py index 3cd708d5029..cf7a25e8dc1 100644 --- a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py +++ b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.2.7", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py index 7e10b6ab430..ec1d7fedac5 100644 --- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py +++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.2.7", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py index 70cadbc97e2..021977fb6b6 100644 --- a/tests/integration/test_check_table/test.py +++ b/tests/integration/test_check_table/test.py @@ -3,6 +3,7 @@ import pytest import concurrent from helpers.cluster import ClickHouseCluster from helpers.client import QueryRuntimeException +from helpers.corrupt_part_data_on_disk import corrupt_part_data_on_disk cluster = ClickHouseCluster(__file__) @@ -21,22 +22,6 @@ def started_cluster(): cluster.shutdown() -def corrupt_data_part_on_disk(node, database, table, part_name): - part_path = node.query( - f"SELECT path FROM system.parts WHERE database = '{database}' AND table = '{table}' AND name = '{part_name}'" - ).strip() - node.exec_in_container( - [ - "bash", - "-c", - "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format( - p=part_path - ), - ], - privileged=True, - ) - - def remove_checksums_on_disk(node, database, table, part_name): part_path = node.query( f"SELECT path FROM system.parts WHERE database = '{database}' AND table = '{table}' AND name = '{part_name}'" @@ -59,14 +44,15 @@ def remove_part_from_disk(node, table, part_name): ) -def test_check_normal_table_corruption(started_cluster): +@pytest.mark.parametrize("merge_tree_settings", [""]) +def test_check_normal_table_corruption(started_cluster, merge_tree_settings): node1.query("DROP TABLE IF EXISTS non_replicated_mt") node1.query( - """ + f""" CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32) ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id - SETTINGS min_bytes_for_wide_part=0; + {merge_tree_settings}; """ ) @@ -105,7 +91,9 @@ def test_check_normal_table_corruption(started_cluster): assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n" - corrupt_data_part_on_disk(node1, "default", "non_replicated_mt", "201902_1_1_0") + corrupt_part_data_on_disk( + node1, "non_replicated_mt", "201902_1_1_0", database="default" + ) assert node1.query( "CHECK TABLE non_replicated_mt", @@ -129,7 +117,9 @@ def test_check_normal_table_corruption(started_cluster): == "201901_2_2_0\t1\t\n" ) - corrupt_data_part_on_disk(node1, "default", "non_replicated_mt", "201901_2_2_0") + corrupt_part_data_on_disk( + node1, "non_replicated_mt", "201901_2_2_0", database="default" + ) remove_checksums_on_disk(node1, "default", "non_replicated_mt", "201901_2_2_0") @@ -139,16 +129,23 @@ def test_check_normal_table_corruption(started_cluster): ).strip().split("\t")[0:2] == ["201901_2_2_0", "0"] -def test_check_replicated_table_simple(started_cluster): +@pytest.mark.parametrize("merge_tree_settings, zk_path_suffix", [("", "_0")]) +def test_check_replicated_table_simple( + started_cluster, merge_tree_settings, zk_path_suffix +): for node in [node1, node2]: - node.query("DROP TABLE IF EXISTS replicated_mt") + node.query("DROP TABLE IF EXISTS replicated_mt SYNC") node.query( """ CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_{zk_path_suffix}', '{replica}') + PARTITION BY toYYYYMM(date) ORDER BY id + {merge_tree_settings} """.format( - replica=node.name + replica=node.name, + zk_path_suffix=zk_path_suffix, + merge_tree_settings=merge_tree_settings, ) ) @@ -220,16 +217,32 @@ def test_check_replicated_table_simple(started_cluster): ) -def test_check_replicated_table_corruption(started_cluster): +@pytest.mark.parametrize( + "merge_tree_settings, zk_path_suffix, part_file_ext", + [ + ( + "", + "_0", + ".bin", + ) + ], +) +def test_check_replicated_table_corruption( + started_cluster, merge_tree_settings, zk_path_suffix, part_file_ext +): for node in [node1, node2]: - node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1") + node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1 SYNC") node.query_with_retry( """ CREATE TABLE replicated_mt_1(date Date, id UInt32, value Int32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1_{zk_path_suffix}', '{replica}') + PARTITION BY toYYYYMM(date) ORDER BY id + {merge_tree_settings} """.format( - replica=node.name + replica=node.name, + merge_tree_settings=merge_tree_settings, + zk_path_suffix=zk_path_suffix, ) ) @@ -248,7 +261,10 @@ def test_check_replicated_table_corruption(started_cluster): "SELECT name from system.parts where table = 'replicated_mt_1' and partition_id = '201901' and active = 1" ).strip() - corrupt_data_part_on_disk(node1, "default", "replicated_mt_1", part_name) + corrupt_part_data_on_disk( + node1, "replicated_mt_1", part_name, part_file_ext, database="default" + ) + assert node1.query( "CHECK TABLE replicated_mt_1 PARTITION 201901", settings={"check_query_single_value_result": 0, "max_threads": 1}, diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml index 206eb4f2bad..4210c13b727 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml @@ -4,6 +4,7 @@ 1000000 + 1 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml index 556bf60d385..95a313ea4f2 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml @@ -4,6 +4,7 @@ 5 + 0 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml index b77e72d808b..7b1f503ed55 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml @@ -7,11 +7,18 @@ + + s3 + http://minio1:9001/root/data/ + minio + minio123 + s3 http://resolver:8083/root/data/ minio minio123 + 1 @@ -23,9 +30,16 @@ + + +
+ s3 +
+
+
- broken_s3 + s3 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index d6bcb3fb8f4..1391f1af6f1 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -64,6 +64,8 @@ def test_upload_after_check_works(cluster, broken_s3): data String ) ENGINE=MergeTree() ORDER BY id + SETTINGS + storage_policy='broken_s3' """ ) @@ -78,7 +80,7 @@ def test_upload_after_check_works(cluster, broken_s3): assert "suddenly disappeared" in error, error -def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): +def get_multipart_counters(node, query_id, log_type="ExceptionWhileProcessing"): node.query("SYSTEM FLUSH LOGS") return [ int(x) @@ -87,7 +89,25 @@ def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): SELECT ProfileEvents['S3CreateMultipartUpload'], ProfileEvents['S3UploadPart'], - ProfileEvents['S3WriteRequestsErrors'] + ProfileEvents['S3WriteRequestsErrors'], + FROM system.query_log + WHERE query_id='{query_id}' + AND type='{log_type}' + """ + ).split() + if x + ] + + +def get_put_counters(node, query_id, log_type="ExceptionWhileProcessing"): + node.query("SYSTEM FLUSH LOGS") + return [ + int(x) + for x in node.query( + f""" + SELECT + ProfileEvents['S3PutObject'], + ProfileEvents['S3WriteRequestsErrors'], FROM system.query_log WHERE query_id='{query_id}' AND type='{log_type}' @@ -97,9 +117,8 @@ def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): ] -# Add "lz4" compression method in the list after https://github.com/ClickHouse/ClickHouse/issues/50975 is fixed @pytest.mark.parametrize( - "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate"] + "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"] ) def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression): node = cluster.instances["node"] @@ -129,17 +148,16 @@ def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression assert "Code: 499" in error, error assert "mock s3 injected error" in error, error - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 0 - assert count_s3_errors == 1 + assert create_multipart == 1 + assert upload_parts == 0 + assert s3_errors == 1 -# Add "lz4" compression method in the list after https://github.com/ClickHouse/ClickHouse/issues/50975 is fixed @pytest.mark.parametrize( - "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate"] + "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"] ) def test_upload_s3_fail_upload_part_when_multi_part_upload( cluster, broken_s3, compression @@ -172,12 +190,12 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload( assert "Code: 499" in error, error assert "mock s3 injected error" in error, error - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts >= 2 - assert count_s3_errors >= 2 + assert create_multipart == 1 + assert upload_parts >= 2 + assert s3_errors >= 2 def test_when_s3_connection_refused_is_retried(cluster, broken_s3): @@ -207,12 +225,12 @@ def test_when_s3_connection_refused_is_retried(cluster, broken_s3): query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_part_upload(count=1000, after=2, action="connection_refused") insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_CONNECTION_REFUSED_RETRIED_1" @@ -279,13 +297,13 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried( query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_part_upload( count=1000, @@ -361,13 +379,13 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried( query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_create_multi_part_upload( count=1000, @@ -438,13 +456,13 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3): query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 7 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 7 + assert s3_errors == 3 broken_s3.setup_at_part_upload( count=1000, @@ -533,3 +551,60 @@ def test_query_is_canceled_with_inf_retries(cluster, broken_s3): retry_count=120, sleep_time=1, ) + + +@pytest.mark.parametrize("node_name", ["node", "node_with_inf_s3_retries"]) +def test_adaptive_timeouts(cluster, broken_s3, node_name): + node = cluster.instances[node_name] + + broken_s3.setup_fake_puts(part_length=1) + broken_s3.setup_slow_answers( + timeout=5, + count=1000000, + ) + + insert_query_id = f"TEST_ADAPTIVE_TIMEOUTS_{node_name}" + node.query( + f""" + INSERT INTO + TABLE FUNCTION s3( + 'http://resolver:8083/root/data/adaptive_timeouts', + 'minio', 'minio123', + 'CSV', auto, 'none' + ) + SELECT + * + FROM system.numbers + LIMIT 1 + SETTINGS + s3_request_timeout_ms=30000, + s3_check_objects_after_upload=0 + """, + query_id=insert_query_id, + ) + + broken_s3.reset() + + put_objects, s3_errors = get_put_counters( + node, insert_query_id, log_type="QueryFinish" + ) + + assert put_objects == 1 + + s3_use_adaptive_timeouts = node.query( + f""" + SELECT + value + FROM system.settings + WHERE + name='s3_use_adaptive_timeouts' + """ + ).strip() + + if node_name == "node_with_inf_s3_retries": + # first 2 attempts failed + assert s3_use_adaptive_timeouts == "1" + assert s3_errors == 1 + else: + assert s3_use_adaptive_timeouts == "0" + assert s3_errors == 0 diff --git a/tests/integration/test_ddl_alter_query/configs/remote_servers.xml b/tests/integration/test_ddl_alter_query/configs/remote_servers.xml index 791af83a2d6..c505345cf7f 100644 --- a/tests/integration/test_ddl_alter_query/configs/remote_servers.xml +++ b/tests/integration/test_ddl_alter_query/configs/remote_servers.xml @@ -25,4 +25,6 @@ + + 1 diff --git a/tests/integration/test_ddl_alter_query/test.py b/tests/integration/test_ddl_alter_query/test.py index f87d943622c..b9f464b3774 100644 --- a/tests/integration/test_ddl_alter_query/test.py +++ b/tests/integration/test_ddl_alter_query/test.py @@ -5,7 +5,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( - "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True + "node1", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + stay_alive=True, ) node2 = cluster.add_instance( "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True @@ -67,3 +70,53 @@ def test_alter(started_cluster): assert node2.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n" assert node3.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n" assert node4.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n" + + +def test_ddl_queue_hostname_change(started_cluster): + node1.query( + "create table hostname_change on cluster test_cluster (n int) engine=Log" + ) + + # There's no easy way to change hostname of a container, so let's update values in zk + query_znode = node1.query( + "select max(name) from system.zookeeper where path='/clickhouse/task_queue/ddl'" + )[:-1] + + value = ( + node1.query( + "select value from system.zookeeper where path='/clickhouse/task_queue/ddl' and name='{}' format TSVRaw".format( + query_znode + ) + )[:-1] + .replace("\\'", "#") + .replace("'", "\\'") + .replace("\n", "\\n") + .replace("#", "\\'") + ) + + finished_znode = node1.query( + "select name from system.zookeeper where path='/clickhouse/task_queue/ddl/{}/finished' and name like '%node1%'".format( + query_znode + ) + )[:-1] + + node1.query( + "insert into system.zookeeper (name, path, value) values ('{}', '/clickhouse/task_queue/ddl', '{}')".format( + query_znode, value.replace("node1", "imaginary.old.hostname") + ) + ) + started_cluster.get_kazoo_client("zoo1").delete( + "/clickhouse/task_queue/ddl/{}/finished/{}".format(query_znode, finished_znode) + ) + + node1.query( + "insert into system.zookeeper (name, path, value) values ('{}', '/clickhouse/task_queue/ddl/{}/finished', '0\\n')".format( + finished_znode.replace("node1", "imaginary.old.hostname"), query_znode + ) + ) + + node1.restart_clickhouse(kill=True) + + node1.query( + "create table hostname_change2 on cluster test_cluster (n int) engine=Log" + ) diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index 82d5eb04d2a..ffe22c62325 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -27,9 +27,9 @@ node2 = cluster.add_instance( ) node3 = cluster.add_instance( "node3", - main_configs=["configs/default_compression.xml", "configs/wide_parts_only.xml"], + main_configs=["configs/default_compression.xml"], image="yandex/clickhouse-server", - tag="20.3.16", + tag="19.16.9.37", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml b/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml new file mode 100644 index 00000000000..aaae3e0c4c1 --- /dev/null +++ b/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml @@ -0,0 +1,3 @@ + + 0 + diff --git a/tests/integration/test_dictionaries_wait_for_load/configs/wait_for_dictionaries_load.xml b/tests/integration/test_dictionaries_wait_for_load/configs/wait_for_dictionaries_load.xml deleted file mode 100644 index a446b730123..00000000000 --- a/tests/integration/test_dictionaries_wait_for_load/configs/wait_for_dictionaries_load.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_dictionaries_wait_for_load/test.py b/tests/integration/test_dictionaries_wait_for_load/test.py index 975e9ca3e56..b30cc61abce 100644 --- a/tests/integration/test_dictionaries_wait_for_load/test.py +++ b/tests/integration/test_dictionaries_wait_for_load/test.py @@ -10,11 +10,14 @@ DICTIONARY_FILES = [ cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", - main_configs=["configs/wait_for_dictionaries_load.xml"], + main_configs=["configs/no_dictionaries_lazy_load.xml"], dictionaries=DICTIONARY_FILES, ) -node0 = cluster.add_instance("node0", dictionaries=DICTIONARY_FILES) +node0 = cluster.add_instance( + "node0", + dictionaries=DICTIONARY_FILES, +) @pytest.fixture(scope="module", autouse=True) @@ -33,9 +36,13 @@ def get_status(instance, dictionary_name): def test_wait_for_dictionaries_load(): - assert get_status(node0, "long_loading_dictionary") == "NOT_LOADED" - assert get_status(node1, "long_loading_dictionary") == "LOADED" assert node1.query("SELECT * FROM dictionary(long_loading_dictionary)") == TSV( [[1, "aa"], [2, "bb"]] ) + + assert get_status(node0, "long_loading_dictionary") == "NOT_LOADED" + assert node0.query("SELECT * FROM dictionary(long_loading_dictionary)") == TSV( + [[1, "aa"], [2, "bb"]] + ) + assert get_status(node0, "long_loading_dictionary") == "LOADED" diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 7695d235425..a71fdeff302 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -38,7 +38,7 @@ def cluster(): stay_alive=True, with_installed_binary=True, image="clickhouse/clickhouse-server", - tag="22.8.14.53", + tag="22.6", allow_analyzer=False, ) diff --git a/tests/integration/test_distributed_backward_compatability/test.py b/tests/integration/test_distributed_backward_compatability/test.py index c48a7ad1fa1..319a4c08e60 100644 --- a/tests/integration/test_distributed_backward_compatability/test.py +++ b/tests/integration/test_distributed_backward_compatability/test.py @@ -8,7 +8,7 @@ node_old = cluster.add_instance( "node1", main_configs=["configs/remote_servers.xml"], image="yandex/clickhouse-server", - tag="20.8.9.6", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_distributed_insert_backward_compatibility/test.py b/tests/integration/test_distributed_insert_backward_compatibility/test.py index 1e566d5e2da..7cfea61ffff 100644 --- a/tests/integration/test_distributed_insert_backward_compatibility/test.py +++ b/tests/integration/test_distributed_insert_backward_compatibility/test.py @@ -11,7 +11,7 @@ node_dist = cluster.add_instance( "node2", main_configs=["configs/remote_servers.xml"], image="yandex/clickhouse-server", - tag="21.11.9.1", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 6e3f1e6e416..a5b353cc030 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -31,7 +31,7 @@ backward = make_instance( "configs/remote_servers_backward.xml", image="clickhouse/clickhouse-server", # version without DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 - tag="23.2.3", + tag="22.6", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml index 91a22a81a22..e1e2444992a 100644 --- a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml +++ b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml @@ -27,14 +27,14 @@ - +
disk1 disk2
-
+
diff --git a/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py index b0e17da37b2..00620668bd9 100644 --- a/tests/integration/test_distributed_storage_configuration/test.py +++ b/tests/integration/test_distributed_storage_configuration/test.py @@ -53,7 +53,7 @@ def test_insert(start_cluster): test, foo, key%2, - 'default' + 'jbod_policy' ) """ ) diff --git a/tests/integration/test_file_cluster/__init__.py b/tests/integration/test_file_cluster/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_file_cluster/configs/cluster.xml b/tests/integration/test_file_cluster/configs/cluster.xml new file mode 100644 index 00000000000..6a0446f5f82 --- /dev/null +++ b/tests/integration/test_file_cluster/configs/cluster.xml @@ -0,0 +1,23 @@ + + + + + + + s0_0_0 + 9000 + + + s0_0_1 + 9000 + + + + + s0_1_0 + 9000 + + + + + diff --git a/tests/integration/test_file_cluster/configs/users.xml b/tests/integration/test_file_cluster/configs/users.xml new file mode 100644 index 00000000000..c12eb2f79f4 --- /dev/null +++ b/tests/integration/test_file_cluster/configs/users.xml @@ -0,0 +1,8 @@ + + + + + default + + + diff --git a/tests/integration/test_file_cluster/test.py b/tests/integration/test_file_cluster/test.py new file mode 100644 index 00000000000..d75cd6c7d23 --- /dev/null +++ b/tests/integration/test_file_cluster/test.py @@ -0,0 +1,125 @@ +import logging +import csv +import time + +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + + +logging.getLogger().setLevel(logging.INFO) +logging.getLogger().addHandler(logging.StreamHandler()) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "s0_0_0", + main_configs=["configs/cluster.xml"], + user_configs=["configs/users.xml"], + macros={"replica": "node1", "shard": "shard1"}, + with_zookeeper=True, + ) + cluster.add_instance( + "s0_0_1", + main_configs=["configs/cluster.xml"], + user_configs=["configs/users.xml"], + macros={"replica": "replica2", "shard": "shard1"}, + with_zookeeper=True, + ) + cluster.add_instance( + "s0_1_0", + main_configs=["configs/cluster.xml"], + user_configs=["configs/users.xml"], + macros={"replica": "replica1", "shard": "shard2"}, + with_zookeeper=True, + ) + + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + for node_name in ("s0_0_0", "s0_0_1", "s0_1_0"): + for i in range(1, 3): + cluster.instances[node_name].query( + f""" + INSERT INTO TABLE FUNCTION file( + 'file{i}.csv', 'CSV', 's String, i UInt32') VALUES ('file{i}',{i}) + """ + ) + + yield cluster + finally: + cluster.shutdown() + + +def get_query(select: str, cluster: bool, files_nums: str, order_by="ORDER BY (i, s)"): + if cluster: + return f"SELECT {select} from fileCluster('my_cluster', 'file{{{files_nums}}}.csv', 'CSV', 's String, i UInt32') {order_by}" + else: + return f"SELECT {select} from file('file{{{files_nums}}}.csv', 'CSV', 's String, i UInt32') {order_by}" + + +def test_select_all(started_cluster): + node = started_cluster.instances["s0_0_0"] + + local = node.query(get_query("*", False, "1,2")) + distributed = node.query(get_query("*", True, "1,2")) + + assert TSV(local) == TSV(distributed) + + +def test_count(started_cluster): + node = started_cluster.instances["s0_0_0"] + + local = node.query(get_query("count(*)", False, "1,2", "")) + distributed = node.query(get_query("count(*)", True, "1,2", "")) + + assert TSV(local) == TSV(distributed) + + +def test_non_existent_cluster(started_cluster): + node = started_cluster.instances["s0_0_0"] + error = node.query_and_get_error( + """ + SELECT count(*) from fileCluster( + 'non_existent_cluster', 'file{1,2}.csv', 'CSV', 's String, i UInt32') + UNION ALL + SELECT count(*) from fileCluster( + 'non_existent_cluster', 'file{1,2}.csv', 'CSV', 's String, i UInt32') + """ + ) + + assert "not found" in error + + +def test_missing_file(started_cluster): + """ + Select from a list of files, _some_ of them don't exist + """ + node = started_cluster.instances["s0_0_0"] + + local_with_missing_file = node.query(get_query("*", False, "1,2,3")) + local_wo_missing_file = node.query(get_query("*", False, "1,2")) + + distributed_with_missing_file = node.query(get_query("*", True, "1,2,3")) + distributed_wo_missing_file = node.query(get_query("*", True, "1,2")) + + assert TSV(local_with_missing_file) == TSV(distributed_with_missing_file) + assert TSV(local_wo_missing_file) == TSV(distributed_wo_missing_file) + assert TSV(local_with_missing_file) == TSV(distributed_wo_missing_file) + assert TSV(local_wo_missing_file) == TSV(distributed_with_missing_file) + + +def test_no_such_files(started_cluster): + """ + Select from a list of files, _none_ of them don't exist + """ + node = started_cluster.instances["s0_0_0"] + + local = node.query(get_query("*", False, "3,4")) + distributed = node.query(get_query("*", True, "3,4")) + + assert TSV(local) == TSV(distributed) diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index be7b12946a7..3a6a1ef76eb 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -46,7 +46,7 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): path = 'paralel_loading_test', disk = 'hdd_blob', max_file_segment_size = '1Ki', - boundary_alignemt = '1Ki', + boundary_alignment = '1Ki', max_size = '1Gi', max_elements = 10000000, load_metadata_threads = 30); diff --git a/tests/integration/test_groupBitmapAnd_on_distributed/test.py b/tests/integration/test_groupBitmapAnd_on_distributed/test.py index 8cf7e0fb2c1..5d3dda8ecf2 100644 --- a/tests/integration/test_groupBitmapAnd_on_distributed/test.py +++ b/tests/integration/test_groupBitmapAnd_on_distributed/test.py @@ -26,7 +26,7 @@ node4 = cluster.add_instance( "node4", main_configs=["configs/clusters.xml"], image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_zookeeper=True, allow_analyzer=False, ) diff --git a/tests/integration/test_keeper_auth/test.py b/tests/integration/test_keeper_auth/test.py index e247984cc6a..78fbf84bbe2 100644 --- a/tests/integration/test_keeper_auth/test.py +++ b/tests/integration/test_keeper_auth/test.py @@ -1,6 +1,7 @@ import pytest import time from helpers.cluster import ClickHouseCluster +from helpers import keeper_utils from kazoo.client import KazooClient, KazooState from kazoo.security import ACL, make_digest_acl, make_acl from kazoo.exceptions import ( @@ -26,6 +27,7 @@ SUPERAUTH = "super:admin" def started_cluster(): try: cluster.start() + keeper_utils.wait_until_connected(cluster, node) yield cluster diff --git a/tests/integration/test_keeper_availability_zone/__init__.py b/tests/integration/test_keeper_availability_zone/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml b/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml new file mode 100644 index 00000000000..3cbf717bb67 --- /dev/null +++ b/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/test_keeper_availability_zone/test.py b/tests/integration/test_keeper_availability_zone/test.py new file mode 100644 index 00000000000..a2003f8539e --- /dev/null +++ b/tests/integration/test_keeper_availability_zone/test.py @@ -0,0 +1,38 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import KeeperClient + + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=["configs/keeper_config.xml"], + with_zookeeper=True, + stay_alive=True, +) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_get_availability_zone(): + with KeeperClient.from_cluster(cluster, "zoo1") as client1: + assert client1.get("/keeper/availability_zone") == "az-zoo1" + + # Keeper2 set enable_auto_detection_on_cloud to true, but is ignored and az-zoo2 is used. + with KeeperClient.from_cluster(cluster, "zoo2") as client2: + assert client2.get("/keeper/availability_zone") == "az-zoo2" + assert "availability_zone" in client2.ls("/keeper") + + # keeper3 is not configured with availability_zone value. + with KeeperClient.from_cluster(cluster, "zoo3") as client3: + with pytest.raises(Exception): + client3.get("/keeper/availability_zone") diff --git a/tests/integration/test_manipulate_statistic/__init__.py b/tests/integration/test_manipulate_statistic/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_manipulate_statistic/config/config.xml b/tests/integration/test_manipulate_statistic/config/config.xml new file mode 100644 index 00000000000..b47f8123499 --- /dev/null +++ b/tests/integration/test_manipulate_statistic/config/config.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_manipulate_statistic/test.py b/tests/integration/test_manipulate_statistic/test.py new file mode 100644 index 00000000000..f1c00a61b07 --- /dev/null +++ b/tests/integration/test_manipulate_statistic/test.py @@ -0,0 +1,124 @@ +import pytest +import logging + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance( + "node1", user_configs=["config/config.xml"], with_zookeeper=True +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + + +def check_stat_file_on_disk(node, table, part_name, column_name, exist): + part_path = node.query( + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() + + assert len(part_path) != 0 + + output = node.exec_in_container( + [ + "bash", + "-c", + "find {p} -type f -name statistic_{col}.stat".format( + p=part_path, col=column_name + ), + ], + privileged=True, + ) + logging.debug( + f"Checking stat file in {part_path} for column {column_name}, got {output}" + ) + if exist: + assert len(output) != 0 + else: + assert len(output) == 0 + + +def run_test_single_node(started_cluster): + node1.query("INSERT INTO test_stat VALUES (1,2,3), (4,5,6)") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0", "a", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0", "c", True) + + node1.query("ALTER TABLE test_stat DROP STATISTIC a type tdigest") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_2", "a", False) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_2", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_2", "c", True) + + node1.query("ALTER TABLE test_stat CLEAR STATISTIC b, c type tdigest") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_3", "a", False) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_3", "b", False) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_3", "c", False) + + node1.query("ALTER TABLE test_stat MATERIALIZE STATISTIC b, c type tdigest") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_4", "a", False) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_4", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_4", "c", True) + + node1.query("ALTER TABLE test_stat ADD STATISTIC a type tdigest") + node1.query("ALTER TABLE test_stat MATERIALIZE STATISTIC a type tdigest") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_5", "a", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_5", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_5", "c", True) + + node1.query("ALTER TABLE test_stat DROP COLUMN c") + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_6", "a", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_6", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_6", "c", False) + + node1.query("ALTER TABLE test_stat RENAME COLUMN b TO c") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_7", "a", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_7", "b", False) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_7", "c", True) + + node1.query("ALTER TABLE test_stat RENAME COLUMN c TO b") + + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_8", "a", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_8", "b", True) + check_stat_file_on_disk(node1, "test_stat", "all_1_1_0_8", "c", False) + + +def test_single_node_wide(started_cluster): + node1.query("DROP TABLE IF EXISTS test_stat") + + node1.query( + """ + CREATE TABLE test_stat(a Int64 STATISTIC(tdigest), b Int64 STATISTIC(tdigest), c Int64 STATISTIC(tdigest)) + ENGINE = MergeTree() ORDER BY a + SETTINGS min_bytes_for_wide_part = 0; + """ + ) + run_test_single_node(started_cluster) + + +def test_single_node_normal(started_cluster): + node1.query("DROP TABLE IF EXISTS test_stat") + + node1.query( + """ + CREATE TABLE test_stat(a Int64 STATISTIC(tdigest), b Int64 STATISTIC(tdigest), c Int64 STATISTIC(tdigest)) + ENGINE = MergeTree() ORDER BY a; + """ + ) + run_test_single_node(started_cluster) diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml b/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/users.xml b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml index 3daa6f06a78..79e5091b28a 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/users.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml @@ -3,6 +3,7 @@ 1 20 + 0 diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 3b2f1c0f6a6..9216b08f942 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -1,6 +1,7 @@ import logging import time import os +import uuid import pytest from helpers.cluster import ClickHouseCluster @@ -10,7 +11,6 @@ from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts from helpers.wait_for_helpers import wait_for_merges - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -24,6 +24,7 @@ def cluster(): "configs/config.xml", "configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml", + "configs/config.d/blob_log.xml", ], user_configs=[ "configs/config.d/users.xml", @@ -37,6 +38,7 @@ def cluster(): main_configs=[ "configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml", + "configs/config.d/blob_log.xml", ], with_minio=True, tmpfs=[ @@ -126,17 +128,22 @@ def list_objects(cluster, path="data/", hint="list_objects"): def wait_for_delete_s3_objects(cluster, expected, timeout=30): while timeout > 0: - if len(list_objects(cluster, "data/")) == expected: - return + existing_objects = list_objects(cluster, "data/") + if len(existing_objects) == expected: + return existing_objects timeout -= 1 time.sleep(1) - assert len(list_objects(cluster, "data/")) == expected + existing_objects = list_objects(cluster, "data/") + assert len(existing_objects) == expected + return existing_objects def remove_all_s3_objects(cluster): minio = cluster.minio_client - for obj in list_objects(cluster, "data/"): + objects_to_delete = list_objects(cluster, "data/") + for obj in objects_to_delete: minio.remove_object(cluster.minio_bucket, obj.object_name) + return objects_to_delete @pytest.fixture(autouse=True, scope="function") @@ -155,7 +162,7 @@ def clear_minio(cluster): def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"): node = cluster.instances[node_name] node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") - wait_for_delete_s3_objects(cluster, 0, timeout=0) + return wait_for_delete_s3_objects(cluster, 0, timeout=0) @pytest.mark.parametrize( @@ -173,10 +180,32 @@ def test_simple_insert_select( minio = cluster.minio_client values1 = generate_values("2020-01-03", 4096) - node.query("INSERT INTO s3_test VALUES {}".format(values1)) + insert_query_id = uuid.uuid4().hex + + node.query( + "INSERT INTO s3_test VALUES {}".format(values1), query_id=insert_query_id + ) assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1 assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part + node.query("SYSTEM FLUSH LOGS") + blob_storage_log = node.query( + f"SELECT * FROM system.blob_storage_log WHERE query_id = '{insert_query_id}' FORMAT PrettyCompactMonoBlock" + ) + + result = node.query( + f"""SELECT + (countIf( (event_type == 'Upload' OR event_type == 'MultiPartUploadWrite') as event_match) as total_events) > 0, + countIf(event_match AND bucket == 'root') == total_events, + countIf(event_match AND remote_path != '') == total_events, + countIf(event_match AND local_path != '') == total_events, + sumIf(data_size, event_match) > 0 + FROM system.blob_storage_log + WHERE query_id = '{insert_query_id}' AND error == '' + """ + ) + assert result == "1\t1\t1\t1\t1\n", blob_storage_log + values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values2)) assert ( @@ -269,6 +298,30 @@ def test_alter_table_columns(cluster, node_name): "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) ) + def assert_deleted_in_log(old_objects, new_objects): + node.query("SYSTEM FLUSH LOGS") + + deleted_objects = set(obj.object_name for obj in old_objects) - set( + obj.object_name for obj in new_objects + ) + deleted_in_log = set( + node.query( + f"SELECT remote_path FROM system.blob_storage_log WHERE error == '' AND event_type == 'Delete'" + ) + .strip() + .split() + ) + + # all deleted objects should be in log + assert all(obj in deleted_in_log for obj in deleted_objects), ( + deleted_objects, + node.query( + f"SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ), + ) + + objects_before = list_objects(cluster, "data/") + node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1") # To ensure parts have merged node.query("OPTIMIZE TABLE s3_test") @@ -278,30 +331,42 @@ def test_alter_table_columns(cluster, node_name): node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)" ) - wait_for_delete_s3_objects( + + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN, ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects + node.query( "ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2} ) assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')" # and file with mutation - wait_for_delete_s3_objects( + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1, ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects + node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2}) # and 2 files with mutations - wait_for_delete_s3_objects( + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects - check_no_objects_after_drop(cluster) + existing_objects = check_no_objects_after_drop(cluster) + + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects @pytest.mark.parametrize("node_name", ["node"]) @@ -796,6 +861,18 @@ def test_merge_canceled_by_s3_errors(cluster, broken_s3, node_name, storage_poli node.wait_for_log_line("ExpectedError Message: mock s3 injected error") + table_uuid = node.query( + "SELECT uuid FROM system.tables WHERE database = 'default' AND name = 'test_merge_canceled_by_s3_errors' LIMIT 1" + ).strip() + + node.query("SYSTEM FLUSH LOGS") + error_count_in_blob_log = node.query( + f"SELECT count() FROM system.blob_storage_log WHERE query_id like '{table_uuid}::%' AND error like '%mock s3 injected error%'" + ).strip() + assert int(error_count_in_blob_log) > 0, node.query( + f"SELECT * FROM system.blob_storage_log WHERE query_id like '{table_uuid}::%' FORMAT PrettyCompactMonoBlock" + ) + check_no_objects_after_drop( cluster, table_name="test_merge_canceled_by_s3_errors", node_name=node_name ) @@ -851,6 +928,10 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name): def test_s3_engine_heavy_write_check_mem( cluster, broken_s3, node_name, in_flight_memory ): + pytest.skip( + "Disabled, will be fixed after https://github.com/ClickHouse/ClickHouse/issues/51152" + ) + in_flight = in_flight_memory[0] memory = in_flight_memory[1] @@ -870,12 +951,18 @@ def test_s3_engine_heavy_write_check_mem( ) broken_s3.setup_fake_multpartuploads() - broken_s3.setup_slow_answers(10 * 1024 * 1024, timeout=15, count=10) + slow_responces = 10 + slow_timeout = 15 + broken_s3.setup_slow_answers( + 10 * 1024 * 1024, timeout=slow_timeout, count=slow_responces + ) query_id = f"INSERT_INTO_S3_ENGINE_QUERY_ID_{in_flight}" node.query( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(50000000)" - f" SETTINGS max_memory_usage={2*memory}" + f" SETTINGS " + f" max_memory_usage={2*memory}" + f", max_threads=1" # ParallelFormattingOutputFormat consumption depends on it f", s3_max_inflight_parts_for_one_file={in_flight}", query_id=query_id, ) @@ -892,7 +979,8 @@ def test_s3_engine_heavy_write_check_mem( assert int(memory_usage) < 1.2 * memory assert int(memory_usage) > 0.8 * memory - assert int(wait_inflight) > in_flight * 1000 * 1000 + # The more in_flight value is the less time CH waits. + assert int(wait_inflight) / 1000 / 1000 > slow_responces * slow_timeout / in_flight check_no_objects_after_drop(cluster, node_name=node_name) diff --git a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml index 235b9a7b7a1..6303e9273fc 100644 --- a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml @@ -11,6 +11,7 @@ true 0 + 0 20000 @@ -33,6 +34,7 @@ true 1 + 0 1 20000 diff --git a/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml b/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml index b8f38f04ca9..dcac83188dc 100644 --- a/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml +++ b/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml @@ -4,6 +4,7 @@ default default + 0 diff --git a/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py index aff07c53114..b59bfcc4f6b 100644 --- a/tests/integration/test_old_versions/test.py +++ b/tests/integration/test_old_versions/test.py @@ -55,7 +55,7 @@ node19_13 = cluster.add_instance( node19_16 = cluster.add_instance( "node19_16", image="yandex/clickhouse-server", - tag="19.16.2.2", + tag="19.16.9.37", with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"], allow_analyzer=False, diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index debb509de90..ba9b5ec6cac 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -360,7 +360,7 @@ node7 = cluster.add_instance( user_configs=["configs_old/users.d/not_optimize_count.xml"], with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index f45841124d9..a591c93d264 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -1351,3 +1351,48 @@ def test_replicated_table_structure_alter(started_cluster): assert "1\t2\t3\t0\n1\t2\t3\t4\n" == dummy_node.query( "SELECT * FROM table_structure.rmt ORDER BY k" ) + + +def test_modify_comment(started_cluster): + main_node.query( + "CREATE DATABASE modify_comment_db ENGINE = Replicated('/test/modify_comment', 'shard1', 'replica' || '1');" + ) + + dummy_node.query( + "CREATE DATABASE modify_comment_db ENGINE = Replicated('/test/modify_comment', 'shard1', 'replica' || '2');" + ) + + main_node.query( + "CREATE TABLE modify_comment_db.modify_comment_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);" + ) + + def restart_verify_not_readonly(): + main_node.restart_clickhouse() + assert ( + main_node.query( + "SELECT is_readonly FROM system.replicas WHERE table = 'modify_comment_table'" + ) + == "0\n" + ) + dummy_node.restart_clickhouse() + assert ( + dummy_node.query( + "SELECT is_readonly FROM system.replicas WHERE table = 'modify_comment_table'" + ) + == "0\n" + ) + + main_node.query( + "ALTER TABLE modify_comment_db.modify_comment_table COMMENT COLUMN d 'Some comment'" + ) + + restart_verify_not_readonly() + + main_node.query( + "ALTER TABLE modify_comment_db.modify_comment_table MODIFY COMMENT 'Some error comment'" + ) + + restart_verify_not_readonly() + + main_node.query("DROP DATABASE modify_comment_db SYNC") + dummy_node.query("DROP DATABASE modify_comment_db SYNC") diff --git a/tests/integration/test_replicated_merge_tree_compatibility/test.py b/tests/integration/test_replicated_merge_tree_compatibility/test.py index c30a0d86c98..32a44aa65b9 100644 --- a/tests/integration/test_replicated_merge_tree_compatibility/test.py +++ b/tests/integration/test_replicated_merge_tree_compatibility/test.py @@ -6,7 +6,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -15,7 +15,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py index d2dbc7c5466..c8b63f9502a 100644 --- a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py +++ b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py @@ -91,9 +91,14 @@ def test_different_keys(): copy_keys(node2, "key_b") create_table() - insert_data() + # Insert two blocks without duplicated blocks to force each replica to actually fetch parts from another replica. + node1.query("INSERT INTO tbl VALUES (1, 'str1')") + node2.query("INSERT INTO tbl VALUES (2, 'str2')") node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl") + # After "SYSTEM SYNC REPLICA" we expect node1 and node2 here both having a part for (1, 'str1') encrypted with "key_a", + # and a part for (2, 'str2') encrypted with "key_b". + # So the command "SELECT * from tbl" must fail on both nodes because each node has only one encryption key. assert "BAD_DECRYPT" in node1.query_and_get_error("SELECT * FROM tbl") assert "BAD_DECRYPT" in node2.query_and_get_error("SELECT * FROM tbl") diff --git a/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py index 00781e473c7..9669e890cd3 100644 --- a/tests/integration/test_replicating_constants/test.py +++ b/tests/integration/test_replicating_constants/test.py @@ -9,7 +9,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.1.14", + tag="19.16.9.37", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_s3_style_link/test.py b/tests/integration/test_s3_style_link/test.py index 7ecf4e633e3..f90b77c360c 100644 --- a/tests/integration/test_s3_style_link/test.py +++ b/tests/integration/test_s3_style_link/test.py @@ -55,3 +55,34 @@ def test_s3_table_functions(started_cluster): ) == "1000000\n" ) + + +def test_s3_table_functions_line_as_string(started_cluster): + node.query( + """ + INSERT INTO FUNCTION s3 + ( + 'minio://data/test_file_line_as_string.tsv.gz', 'minio', 'minio123' + ) + SELECT * FROM numbers(1000000); + """ + ) + + assert ( + node.query( + """ + SELECT _file FROM s3 + ( + 'minio://data/*as_string.tsv.gz', 'minio', 'minio123', 'LineAsString' + ) LIMIT 1; + """ + ) + == node.query( + """ + SELECT _file FROM s3 + ( + 'http://minio1:9001/root/data/*as_string.tsv.gz', 'minio', 'minio123', 'LineAsString' + ) LIMIT 1; + """ + ) + ) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index e0365f70e7f..96fff6b891f 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -1156,3 +1156,37 @@ def test_filtering_by_file_or_path(cluster): ) assert int(result) == 1 + + +def test_size_virtual_column(cluster): + node = cluster.instances["node"] + storage_account_url = cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"] + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column1.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + ) + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column2.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11", + ) + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column3.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111", + ) + + result = azure_query( + node, + f"select _file, _size from azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column*.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') " + f"order by _file", + ) + + assert ( + result + == "test_size_virtual_column1.tsv\t2\ntest_size_virtual_column2.tsv\t3\ntest_size_virtual_column3.tsv\t4\n" + ) diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index 11198a7175b..d5f8d04e258 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -9,6 +9,8 @@ import json import pytest import time import glob +import uuid +import os from pyspark.sql.types import ( StructType, @@ -515,3 +517,35 @@ def test_metadata_file_selection(started_cluster, format_version): create_iceberg_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 500 + + +@pytest.mark.parametrize("format_version", ["1", "2"]) +def test_metadata_file_format_with_uuid(started_cluster, format_version): + instance = started_cluster.instances["node1"] + spark = started_cluster.spark_session + minio_client = started_cluster.minio_client + bucket = started_cluster.minio_bucket + TABLE_NAME = "test_metadata_selection_with_uuid_" + format_version + + spark.sql( + f"CREATE TABLE {TABLE_NAME} (id bigint, data string) USING iceberg TBLPROPERTIES ('format-version' = '2', 'write.update.mode'='merge-on-read', 'write.delete.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')" + ) + + for i in range(50): + spark.sql( + f"INSERT INTO {TABLE_NAME} select id, char(id + ascii('a')) from range(10)" + ) + + for i in range(50): + os.rename( + f"/iceberg_data/default/{TABLE_NAME}/metadata/v{i + 1}.metadata.json", + f"/iceberg_data/default/{TABLE_NAME}/metadata/{str(i).zfill(5)}-{uuid.uuid4()}.metadata.json", + ) + + files = upload_directory( + minio_client, bucket, f"/iceberg_data/default/{TABLE_NAME}/", "" + ) + + create_iceberg_table(instance, TABLE_NAME) + + assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 500 diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 983e52ca294..f26a273fe5e 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -102,18 +102,35 @@ def rabbitmq_setup_teardown(): # Tests -def test_rabbitmq_select(rabbitmq_cluster): +@pytest.mark.parametrize( + "secure", + [ + pytest.param(0), + pytest.param(1), + ], +) +def test_rabbitmq_select(rabbitmq_cluster, secure): + if secure and instance.is_built_with_thread_sanitizer(): + pytest.skip( + "Data races: see https://github.com/ClickHouse/ClickHouse/issues/56866" + ) + + port = cluster.rabbitmq_port + if secure: + port = cluster.rabbitmq_secure_port + instance.query( """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ - SETTINGS rabbitmq_host_port = '{}:5672', + SETTINGS rabbitmq_host_port = '{}:{}', rabbitmq_exchange_name = 'select', rabbitmq_commit_on_select = 1, rabbitmq_format = 'JSONEachRow', - rabbitmq_row_delimiter = '\\n'; + rabbitmq_row_delimiter = '\\n', + rabbitmq_secure = {}; """.format( - rabbitmq_cluster.rabbitmq_host + rabbitmq_cluster.rabbitmq_host, port, secure ) ) @@ -3442,18 +3459,18 @@ def test_rabbitmq_handle_error_mode_stream(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n', rabbitmq_handle_error_mode = 'stream'; - + CREATE TABLE test.errors (error Nullable(String), broken_message Nullable(String)) ENGINE = MergeTree() ORDER BY tuple(); CREATE MATERIALIZED VIEW test.errors_view TO test.errors AS SELECT _error as error, _raw_message as broken_message FROM test.rabbit where not isNull(_error); - + CREATE TABLE test.data (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - + CREATE MATERIALIZED VIEW test.view TO test.data AS SELECT key, value FROM test.rabbit; """.format( diff --git a/tests/integration/test_storage_s3/configs/blob_log.xml b/tests/integration/test_storage_s3/configs/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_storage_s3/configs/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_storage_s3/configs/defaultS3.xml b/tests/integration/test_storage_s3/configs/defaultS3.xml index 37454ef6781..7dac6d9fbb5 100644 --- a/tests/integration/test_storage_s3/configs/defaultS3.xml +++ b/tests/integration/test_storage_s3/configs/defaultS3.xml @@ -1,9 +1,4 @@ - - - 5 - - http://resolver:8080 diff --git a/tests/integration/test_storage_s3/configs/s3_retry.xml b/tests/integration/test_storage_s3/configs/s3_retry.xml index 727e23273cf..3171da051d0 100644 --- a/tests/integration/test_storage_s3/configs/s3_retry.xml +++ b/tests/integration/test_storage_s3/configs/s3_retry.xml @@ -1,7 +1,9 @@ - 5 + 1 + 10 + 5 diff --git a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py index 103dd30340c..5ef781bdc9e 100644 --- a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py +++ b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py @@ -4,6 +4,7 @@ import re import socket import struct import sys +import time def gen_n_digit_number(n): @@ -39,14 +40,14 @@ random.seed("Unstable server/1.0") # Generating some "random" data and append a line which contains sum of numbers in column 4. lines = ( - b"".join((gen_line() for _ in range(500000))) + b"".join([gen_line() for _ in range(500000)]) + f"0,0,0,{-sum_in_4_column}\n".encode() ) class RequestHandler(http.server.BaseHTTPRequestHandler): def do_HEAD(self): - if self.path == "/root/test.csv": + if self.path == "/root/test.csv" or self.path == "/root/slow_send_test.csv": self.from_bytes = 0 self.end_bytes = len(lines) self.size = self.end_bytes @@ -101,6 +102,18 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): print("Dropping connection") break + if self.path == "/root/slow_send_test.csv": + self.send_block_size = 81920 + + for c, i in enumerate( + range(self.from_bytes, self.end_bytes, self.send_block_size) + ): + self.wfile.write( + lines[i : min(i + self.send_block_size, self.end_bytes)] + ) + self.wfile.flush() + time.sleep(1) + elif self.path == "/": self.wfile.write(b"OK") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 01ade1acc4d..f5c6f54a1ea 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1,5 +1,5 @@ import gzip -import json +import uuid import logging import os import io @@ -54,6 +54,7 @@ def started_cluster(): "configs/defaultS3.xml", "configs/named_collections.xml", "configs/schema_cache.xml", + "configs/blob_log.xml", ], user_configs=[ "configs/access.xml", @@ -104,11 +105,9 @@ def started_cluster(): cluster.shutdown() -def run_query(instance, query, stdin=None, settings=None): - # type: (ClickHouseInstance, str, object, dict) -> str - +def run_query(instance, query, *args, **kwargs): logging.info("Running query '{}'...".format(query)) - result = instance.query(query, stdin=stdin, settings=settings) + result = instance.query(query, *args, **kwargs) logging.info("Query finished") return result @@ -129,7 +128,7 @@ def run_query(instance, query, stdin=None, settings=None): ], ) def test_put(started_cluster, maybe_auth, positive, compression): - # type: (ClickHouseCluster) -> None + # type: (ClickHouseCluster, str, bool, str) -> None bucket = ( started_cluster.minio_bucket @@ -496,7 +495,7 @@ def test_put_get_with_globs(started_cluster): ], ) def test_multipart(started_cluster, maybe_auth, positive): - # type: (ClickHouseCluster) -> None + # type: (ClickHouseCluster, str, bool) -> None bucket = ( started_cluster.minio_bucket @@ -529,7 +528,7 @@ def test_multipart(started_cluster, maybe_auth, positive): maybe_auth, table_format, ) - + put_query_id = uuid.uuid4().hex try: run_query( instance, @@ -539,6 +538,7 @@ def test_multipart(started_cluster, maybe_auth, positive): "s3_min_upload_part_size": min_part_size_bytes, "s3_max_single_part_upload_size": 0, }, + query_id=put_query_id, ) except helpers.client.QueryRuntimeException: if positive: @@ -583,6 +583,24 @@ def test_multipart(started_cluster, maybe_auth, positive): == "\t".join(map(str, [total_rows, total_rows * 2, total_rows * 3])) + "\n" ) + if positive: + instance.query("SYSTEM FLUSH LOGS") + blob_storage_log = instance.query(f"SELECT * FROM system.blob_storage_log") + + result = instance.query( + f"""SELECT + countIf(event_type == 'MultiPartUploadCreate'), + countIf(event_type == 'MultiPartUploadWrite'), + countIf(event_type == 'MultiPartUploadComplete'), + count() + FROM system.blob_storage_log WHERE query_id = '{put_query_id}'""" + ) + r = result.strip().split("\t") + assert int(r[0]) == 1, blob_storage_log + assert int(r[1]) >= 1, blob_storage_log + assert int(r[2]) == 1, blob_storage_log + assert int(r[0]) + int(r[1]) + int(r[2]) == int(r[3]), blob_storage_log + def test_remote_host_filter(started_cluster): instance = started_cluster.instances["restricted_dummy"] @@ -818,6 +836,15 @@ def test_storage_s3_get_unstable(started_cluster): assert result.splitlines() == ["500001,500000,0"] +def test_storage_s3_get_slow(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64" + get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/slow_send_test.csv', 'CSV', '{table_format}') FORMAT CSV" + result = run_query(instance, get_query) + assert result.splitlines() == ["500001,500000,0"] + + def test_storage_s3_put_uncompressed(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] @@ -846,14 +873,34 @@ def test_storage_s3_put_uncompressed(started_cluster): name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename ), ) - - run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data))) + insert_query_id = uuid.uuid4().hex + data_sep = "),(" + run_query( + instance, + "INSERT INTO {} VALUES ({})".format(name, data_sep.join(data)), + query_id=insert_query_id, + ) run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"] uncompressed_content = get_s3_file_content(started_cluster, bucket, filename) assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 753 + instance.query("SYSTEM FLUSH LOGS") + blob_storage_log = instance.query(f"SELECT * FROM system.blob_storage_log") + + result = instance.query( + f"""SELECT + countIf(event_type == 'Upload'), + countIf(remote_path == '{filename}'), + countIf(bucket == '{bucket}'), + count() + FROM system.blob_storage_log WHERE query_id = '{insert_query_id}'""" + ) + r = result.strip().split("\t") + assert int(r[0]) >= 1, blob_storage_log + assert all(col == r[0] for col in r), blob_storage_log + @pytest.mark.parametrize( "extension,method", @@ -944,13 +991,6 @@ def test_predefined_connection_configuration(started_cluster): instance.query("GRANT SELECT ON *.* TO user") instance.query(f"drop table if exists {name}", user="user") - error = instance.query_and_get_error( - f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')" - ) - assert ( - "To execute this query, it's necessary to have the grant NAMED COLLECTION ON s3_conf1" - in error - ) error = instance.query_and_get_error( f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')", user="user", @@ -975,11 +1015,6 @@ def test_predefined_connection_configuration(started_cluster): ) assert result == instance.query("SELECT number FROM numbers(10)") - error = instance.query_and_get_error("SELECT * FROM s3(no_collection)") - assert ( - "To execute this query, it's necessary to have the grant NAMED COLLECTION ON no_collection" - in error - ) error = instance.query_and_get_error("SELECT * FROM s3(no_collection)", user="user") assert ( "To execute this query, it's necessary to have the grant NAMED COLLECTION ON no_collection" diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 9f41cfd176d..b1163a549b1 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -717,6 +717,8 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 300 + row_num = 50 + total_rows = row_num * files_to_generate for instance in [node, node_2]: create_table( @@ -734,7 +736,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): create_mv(instance, table_name, dst_table_name) total_values = generate_random_files( - started_cluster, files_path, files_to_generate, row_num=1 + started_cluster, files_path, files_to_generate, row_num=row_num ) def get_count(node, table_name): @@ -743,13 +745,13 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): for _ in range(150): if ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) == files_to_generate: + ) == total_rows: break time.sleep(1) if ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) != files_to_generate: + ) != total_rows: info = node.query( f"SELECT * FROM system.s3queue WHERE zookeeper_path like '%{table_name}' ORDER BY file_name FORMAT Vertical" ) @@ -762,7 +764,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): list(map(int, l.split())) for l in run_query(node_2, get_query).splitlines() ] - assert len(res1) + len(res2) == files_to_generate + assert len(res1) + len(res2) == total_rows # Checking that all engines have made progress assert len(res1) > 0 @@ -774,7 +776,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): time.sleep(10) assert ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) == files_to_generate + ) == total_rows def test_max_set_age(started_cluster): diff --git a/tests/integration/test_system_metrics/test.py b/tests/integration/test_system_metrics/test.py index 338622b824e..e59ed919708 100644 --- a/tests/integration/test_system_metrics/test.py +++ b/tests/integration/test_system_metrics/test.py @@ -5,6 +5,8 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry from helpers.network import PartitionManager +from kazoo.client import KazooClient + def fill_nodes(nodes, shard): for node in nodes: @@ -24,7 +26,10 @@ def fill_nodes(nodes, shard): cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( - "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True + "node1", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + stay_alive=True, ) node2 = cluster.add_instance( "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True @@ -214,3 +219,45 @@ def test_attach_without_zk_incr_readonly_metric(start_cluster): retry_count=300, sleep_time=1, ) + + +def get_zk(timeout=30.0): + _zk_instance = KazooClient( + hosts=cluster.get_instance_ip("zoo1") + ":2181", timeout=timeout + ) + _zk_instance.start() + return _zk_instance + + +def test_broken_tables_readonly_metric(start_cluster): + node1.query( + "CREATE TABLE test.broken_table_readonly(initial_name Int8) ENGINE = ReplicatedMergeTree('/clickhouse/broken_table_readonly', 'replica') ORDER BY tuple()" + ) + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) + + zk_path = node1.query( + "SELECT replica_path FROM system.replicas WHERE table = 'broken_table_readonly'" + ).strip() + + node1.stop_clickhouse() + + zk_client = get_zk() + + columns_path = zk_path + "/columns" + metadata = zk_client.get(columns_path)[0] + modified_metadata = metadata.replace(b"initial_name", b"new_name") + zk_client.set(columns_path, modified_metadata) + + node1.start_clickhouse() + + assert node1.contains_in_log("Initialization failed, table will remain readonly") + assert ( + node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") + == "1\n" + ) diff --git a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml index b753123a5ef..5a087d03266 100644 --- a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml @@ -12,6 +12,7 @@ /tiny_local_cache/ 10M 1M + 1M 1 diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index c1c076277bb..2f18a0a1afa 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1,5 +1,5 @@ +import inspect import random -import string import threading import time from multiprocessing.dummy import Pool @@ -8,6 +8,8 @@ from helpers.test_tools import assert_logs_contain_with_retry import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import assert_eq_with_retry # FIXME: each sleep(1) is a time bomb, and not only this cause false positive # it also makes the test not reliable (i.e. assertions may be wrong, due timing issues) @@ -26,6 +28,7 @@ node1 = cluster.add_instance( with_zookeeper=True, tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], macros={"shard": 0, "replica": 1}, + stay_alive=True, ) node2 = cluster.add_instance( @@ -1813,3 +1816,117 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type): node2.query("DROP TABLE IF EXISTS {} SYNC".format(name)) except: pass + + +class TestCancelBackgroundMoving: + @pytest.fixture() + def prepare_table(self, request, started_cluster): + name = unique_table_name(request.node.name) + engine = f"ReplicatedMergeTree('/clickhouse/{name}', '1')" + + node1.query( + f""" + CREATE TABLE {name} ( + s1 String, + d1 DateTime + ) ENGINE = {engine} + ORDER BY tuple() + TTL d1 + interval 5 second TO DISK 'external' + SETTINGS storage_policy='small_jbod_with_external' + """ + ) + + node1.query("SYSTEM STOP MOVES") + + # Insert part which is about to move + node1.query( + "INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII({}), toDateTime({}))".format( + name, 10 * 1024 * 1024, time.time() + ) + ) + + # Set low bandwidth to have enough time to cancel part moving + config = inspect.cleandoc( + f""" + + { 256 * 1024 } + + """ + ) + node1.replace_config( + "/etc/clickhouse-server/config.d/disk_throttling.xml", config + ) + node1.restart_clickhouse() + + try: + yield name + finally: + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") + + def test_cancel_background_moving_on_stop_moves_query(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + # Wait for background moving task to be cancelled + node1.query("SYSTEM STOP MOVES") + assert_logs_contain_with_retry( + node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts" + ) + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "0", + ) + + # Ensure that part was not moved + assert set(get_used_disks_for_table(node1, name)) == {"jbod1"} + + def test_cancel_background_moving_on_table_detach(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + # Wait for background moving task to be cancelled + node1.query(f"DETACH Table {name}") + assert_logs_contain_with_retry( + node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts" + ) + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "0", + ) + + def test_cancel_background_moving_on_zookeeper_disconnect(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + with PartitionManager() as pm: + pm.drop_instance_zk_connections(node1) + # Wait for background moving task to be cancelled + assert_logs_contain_with_retry( + node1, + "MergeTreeBackgroundExecutor.*Cancelled moving parts", + retry_count=30, + sleep_time=1, + ) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 117ebe37dd2..119a211ae45 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -17,7 +17,7 @@ node4 = cluster.add_instance( "node4", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -30,7 +30,7 @@ node5 = cluster.add_instance( "node5", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -42,7 +42,7 @@ node6 = cluster.add_instance( "node6", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -66,47 +66,41 @@ def started_cluster(): cluster.shutdown() -def drop_table(nodes, table_name): - for node in nodes: - node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name)) - - # Column TTL works only with wide parts, because it's very expensive to apply it for compact parts def test_ttl_columns(started_cluster): - drop_table([node1, node2], "test_ttl") + table_name = f"test_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) + CREATE TABLE {table_name}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name + table_name=table_name, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)" + f"INSERT INTO {table_name} VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)" ) node1.query( - "INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)" + f"INSERT INTO {table_name} VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)" ) time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl FINAL") + node1.query(f"OPTIMIZE TABLE {table_name} FINAL") expected = "1\t0\t0\n2\t0\t0\n" - assert TSV(node1.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + assert TSV(node1.query(f"SELECT id, a, b FROM {table_name} ORDER BY id")) == TSV( expected ) - assert TSV(node2.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + assert TSV(node2.query(f"SELECT id, a, b FROM {table_name} ORDER BY id")) == TSV( expected ) def test_merge_with_ttl_timeout(started_cluster): - table = "test_merge_with_ttl_timeout" - drop_table([node1, node2], table) + table = f"test_merge_with_ttl_timeout_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ @@ -157,11 +151,11 @@ def test_merge_with_ttl_timeout(started_cluster): def test_ttl_many_columns(started_cluster): - drop_table([node1, node2], "test_ttl_2") + table = f"test_ttl_2{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl_2(date DateTime, id UInt32, + CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date, _idx Int32 TTL date, _offset Int32 TTL date, @@ -169,44 +163,40 @@ def test_ttl_many_columns(started_cluster): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_2', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name + table=table, replica=node.name ) ) - node1.query("SYSTEM STOP TTL MERGES test_ttl_2") - node2.query("SYSTEM STOP TTL MERGES test_ttl_2") + node1.query(f"SYSTEM STOP TTL MERGES {table}") + node2.query(f"SYSTEM STOP TTL MERGES {table}") node1.query( - "INSERT INTO test_ttl_2 VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)" + f"INSERT INTO {table} VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)" ) node1.query( - "INSERT INTO test_ttl_2 VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)" + f"INSERT INTO {table} VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=5) # Check that part will appear in result of merge - node1.query("SYSTEM STOP FETCHES test_ttl_2") - node2.query("SYSTEM STOP FETCHES test_ttl_2") + node1.query(f"SYSTEM STOP FETCHES {table}") + node2.query(f"SYSTEM STOP FETCHES {table}") - node1.query("SYSTEM START TTL MERGES test_ttl_2") - node2.query("SYSTEM START TTL MERGES test_ttl_2") + node1.query(f"SYSTEM START TTL MERGES {table}") + node2.query(f"SYSTEM START TTL MERGES {table}") time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl_2 FINAL", timeout=5) + node1.query(f"OPTIMIZE TABLE {table} FINAL", timeout=5) - node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=5) expected = "1\t0\t0\t0\t0\n6\t7\t8\t9\t10\n" assert TSV( - node1.query( - "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" - ) + node1.query(f"SELECT id, a, _idx, _offset, _partition FROM {table} ORDER BY id") ) == TSV(expected) assert TSV( - node2.query( - "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" - ) + node2.query(f"SELECT id, a, _idx, _offset, _partition FROM {table} ORDER BY id") ) == TSV(expected) @@ -218,107 +208,107 @@ def test_ttl_many_columns(started_cluster): ], ) def test_ttl_table(started_cluster, delete_suffix): - drop_table([node1, node2], "test_ttl") + table = f"test_ttl_table_{delete_suffix}_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') + CREATE TABLE {table}(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name, delete_suffix=delete_suffix + table=table, replica=node.name, delete_suffix=delete_suffix ) ) - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1)") - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2)") + node1.query(f"INSERT INTO {table} VALUES (toDateTime('2000-10-10 00:00:00'), 1)") + node1.query(f"INSERT INTO {table} VALUES (toDateTime('2000-10-11 10:00:00'), 2)") time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl FINAL") + node1.query(f"OPTIMIZE TABLE {table} FINAL") - assert TSV(node1.query("SELECT * FROM test_ttl")) == TSV("") - assert TSV(node2.query("SELECT * FROM test_ttl")) == TSV("") + assert TSV(node1.query(f"SELECT * FROM {table}")) == TSV("") + assert TSV(node2.query(f"SELECT * FROM {table}")) == TSV("") def test_modify_ttl(started_cluster): - drop_table([node1, node2], "test_ttl") + table = f"test_modify_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(d DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_modify', '{replica}') + CREATE TABLE {table}(d DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id """.format( - replica=node.name + table=table, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + f"INSERT INTO {table} VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=20) node1.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "2\n3\n" + assert node2.query(f"SELECT id FROM {table}") == "2\n3\n" node2.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node1.query("SELECT id FROM test_ttl") == "3\n" + assert node1.query(f"SELECT id FROM {table}") == "3\n" node1.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "" + assert node2.query(f"SELECT id FROM {table}") == "" def test_modify_column_ttl(started_cluster): - drop_table([node1, node2], "test_ttl") + table = f"test_modify_column_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(d DateTime, id UInt32 DEFAULT 42) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_column', '{replica}') + CREATE TABLE {table}(d DateTime, id UInt32 DEFAULT 42) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY d """.format( - replica=node.name + table=table, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + f"INSERT INTO {table} VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=20) node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "42\n2\n3\n" + assert node2.query(f"SELECT id FROM {table}") == "42\n2\n3\n" node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node1.query("SELECT id FROM test_ttl") == "42\n42\n3\n" + assert node1.query(f"SELECT id FROM {table}") == "42\n42\n3\n" node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "42\n42\n42\n" + assert node2.query(f"SELECT id FROM {table}") == "42\n42\n42\n" def test_ttl_double_delete_rule_returns_error(started_cluster): - drop_table([node1, node2], "test_ttl") + table = "test_ttl_double_delete_rule_returns_error" try: node1.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_double_delete', '{replica}') + CREATE TABLE {table}(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000 """.format( - replica=node1.name + table=table, replica=node1.name ) ) assert False @@ -364,7 +354,6 @@ def test_ttl_alter_delete(started_cluster, name, engine): for a table that has TTL delete expression defined but no explicit storage policy assigned. """ - drop_table([node1], name) node1.query( """ @@ -426,7 +415,6 @@ def test_ttl_alter_delete(started_cluster, name, engine): def test_ttl_empty_parts(started_cluster): - drop_table([node1, node2], "test_ttl_empty_parts") for node in [node1, node2]: node.query( """ @@ -519,65 +507,59 @@ def test_ttl_empty_parts(started_cluster): [(node1, node2, 0), (node3, node4, 1), (node5, node6, 2)], ) def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): - drop_table([node_left, node_right], "test_ttl_delete") - drop_table([node_left, node_right], "test_ttl_group_by") - drop_table([node_left, node_right], "test_ttl_where") - + table = f"test_ttl_compatibility_{node_left.name}_{node_right.name}_{num_run}" for node in [node_left, node_right]: node.query( """ - CREATE TABLE test_ttl_delete(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_delete_{suff}', '{replica}') + CREATE TABLE {table}_delete(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_delete', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) node.query( """ - CREATE TABLE test_ttl_group_by(date DateTime, id UInt32, val UInt64) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_group_by_{suff}', '{replica}') + CREATE TABLE {table}_group_by(date DateTime, id UInt32, val UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_group_by', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND GROUP BY id SET val = sum(val) - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) node.query( """ - CREATE TABLE test_ttl_where(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_where_{suff}', '{replica}') + CREATE TABLE {table}_where(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_where', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND DELETE WHERE id % 2 = 1 - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) - node_left.query("INSERT INTO test_ttl_delete VALUES (now(), 1)") + node_left.query(f"INSERT INTO {table}_delete VALUES (now(), 1)") node_left.query( - "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)" + f"INSERT INTO {table}_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)" ) - node_right.query("INSERT INTO test_ttl_delete VALUES (now(), 3)") + node_right.query(f"INSERT INTO {table}_delete VALUES (now(), 3)") node_right.query( - "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)" + f"INSERT INTO {table}_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)" ) - node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 1)") - node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 2)") - node_right.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 3)") - node_right.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 4)") + node_left.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 1)") + node_left.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 2)") + node_right.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 3)") + node_right.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 4)") - node_left.query("INSERT INTO test_ttl_where VALUES (now(), 1)") - node_left.query("INSERT INTO test_ttl_where VALUES (now(), 2)") - node_right.query("INSERT INTO test_ttl_where VALUES (now(), 3)") - node_right.query("INSERT INTO test_ttl_where VALUES (now(), 4)") + node_left.query(f"INSERT INTO {table}_where VALUES (now(), 1)") + node_left.query(f"INSERT INTO {table}_where VALUES (now(), 2)") + node_right.query(f"INSERT INTO {table}_where VALUES (now(), 3)") + node_right.query(f"INSERT INTO {table}_where VALUES (now(), 4)") if node_left.with_installed_binary: node_left.restart_with_latest_version() @@ -588,13 +570,13 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): time.sleep(5) # Wait for TTL # after restart table can be in readonly mode - exec_query_with_retry(node_right, "OPTIMIZE TABLE test_ttl_delete FINAL") - node_right.query("OPTIMIZE TABLE test_ttl_group_by FINAL") - node_right.query("OPTIMIZE TABLE test_ttl_where FINAL") + exec_query_with_retry(node_right, f"OPTIMIZE TABLE {table}_delete FINAL") + node_right.query(f"OPTIMIZE TABLE {table}_group_by FINAL") + node_right.query(f"OPTIMIZE TABLE {table}_where FINAL") - exec_query_with_retry(node_left, "OPTIMIZE TABLE test_ttl_delete FINAL") - node_left.query("OPTIMIZE TABLE test_ttl_group_by FINAL", timeout=20) - node_left.query("OPTIMIZE TABLE test_ttl_where FINAL", timeout=20) + exec_query_with_retry(node_left, f"OPTIMIZE TABLE {table}_delete FINAL") + node_left.query(f"OPTIMIZE TABLE {table}_group_by FINAL", timeout=20) + node_left.query(f"OPTIMIZE TABLE {table}_where FINAL", timeout=20) # After OPTIMIZE TABLE, it is not guaranteed that everything is merged. # Possible scenario (for test_ttl_group_by): @@ -605,19 +587,19 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): # 4. OPTIMIZE FINAL does nothing, cause there is an entry for 0_3 # # So, let's also sync replicas for node_right (for now). - exec_query_with_retry(node_right, "SYSTEM SYNC REPLICA test_ttl_delete") - node_right.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) - node_right.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_right, f"SYSTEM SYNC REPLICA {table}_delete") + node_right.query(f"SYSTEM SYNC REPLICA {table}_group_by", timeout=20) + node_right.query(f"SYSTEM SYNC REPLICA {table}_where", timeout=20) - exec_query_with_retry(node_left, "SYSTEM SYNC REPLICA test_ttl_delete") - node_left.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) - node_left.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_left, f"SYSTEM SYNC REPLICA {table}_delete") + node_left.query(f"SYSTEM SYNC REPLICA {table}_group_by", timeout=20) + node_left.query(f"SYSTEM SYNC REPLICA {table}_where", timeout=20) - assert node_left.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" - assert node_right.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" + assert node_left.query(f"SELECT id FROM {table}_delete ORDER BY id") == "2\n4\n" + assert node_right.query(f"SELECT id FROM {table}_delete ORDER BY id") == "2\n4\n" - assert node_left.query("SELECT val FROM test_ttl_group_by ORDER BY id") == "10\n" - assert node_right.query("SELECT val FROM test_ttl_group_by ORDER BY id") == "10\n" + assert node_left.query(f"SELECT val FROM {table}_group_by ORDER BY id") == "10\n" + assert node_right.query(f"SELECT val FROM {table}_group_by ORDER BY id") == "10\n" - assert node_left.query("SELECT id FROM test_ttl_where ORDER BY id") == "2\n4\n" - assert node_right.query("SELECT id FROM test_ttl_where ORDER BY id") == "2\n4\n" + assert node_left.query(f"SELECT id FROM {table}_where ORDER BY id") == "2\n4\n" + assert node_right.query(f"SELECT id FROM {table}_where ORDER BY id") == "2\n4\n" diff --git a/tests/integration/test_undrop_query/test.py b/tests/integration/test_undrop_query/test.py index 63d92d84541..590a5690e55 100644 --- a/tests/integration/test_undrop_query/test.py +++ b/tests/integration/test_undrop_query/test.py @@ -23,7 +23,6 @@ def started_cluster(): def test_undrop_drop_and_undrop_loop(started_cluster): count = 0 - node.query("set allow_experimental_undrop_table_query = 1;") while count < 10: random_sec = random.randint(0, 10) table_uuid = uuid.uuid1().__str__() @@ -45,7 +44,7 @@ def test_undrop_drop_and_undrop_loop(started_cluster): + count.__str__() + " uuid '" + table_uuid - + "' settings allow_experimental_undrop_table_query = 1;" + + "';" ) assert "UNKNOWN_TABLE" in error else: @@ -54,6 +53,6 @@ def test_undrop_drop_and_undrop_loop(started_cluster): + count.__str__() + " uuid '" + table_uuid - + "' settings allow_experimental_undrop_table_query = 1;" + + "';" ) count = count + 1 diff --git a/tests/integration/test_version_update/test.py b/tests/integration/test_version_update/test.py index b8fa3e7ebb4..a752960bc76 100644 --- a/tests/integration/test_version_update/test.py +++ b/tests/integration/test_version_update/test.py @@ -12,18 +12,18 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="21.2", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, allow_analyzer=False, ) -# Use differents nodes because if there is node.restart_from_latest_version(), then in later tests +# Use different nodes because if there is node.restart_from_latest_version(), then in later tests # it will be with latest version, but shouldn't, order of tests in CI is shuffled. node3 = cluster.add_instance( "node3", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -31,7 +31,7 @@ node3 = cluster.add_instance( node4 = cluster.add_instance( "node4", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -39,7 +39,7 @@ node4 = cluster.add_instance( node5 = cluster.add_instance( "node5", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -47,7 +47,7 @@ node5 = cluster.add_instance( node6 = cluster.add_instance( "node6", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index f3ae190ee46..4e84b4c10ca 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -10,7 +10,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -22,7 +22,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -34,7 +34,7 @@ node3 = cluster.add_instance( "node3", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -72,8 +72,8 @@ def test_mutate_and_upgrade(start_cluster): node1.query("DETACH TABLE mt") # stop being leader node1.query("SYSTEM FLUSH LOGS") node2.query("SYSTEM FLUSH LOGS") - node1.restart_with_latest_version(signal=9, fix_metadata=True) - node2.restart_with_latest_version(signal=9, fix_metadata=True) + node1.restart_with_latest_version(signal=9, fix_metadata=False) + node2.restart_with_latest_version(signal=9, fix_metadata=False) # After hard restart table can be in readonly mode exec_query_with_retry( @@ -129,7 +129,7 @@ def test_upgrade_while_mutation(start_cluster): # (We could be in process of creating some system table, which will leave empty directory on restart, # so when we start moving system tables from ordinary to atomic db, it will complain about some undeleted files) node3.query("SYSTEM FLUSH LOGS") - node3.restart_with_latest_version(signal=9, fix_metadata=True) + node3.restart_with_latest_version(signal=9, fix_metadata=False) # checks for readonly exec_query_with_retry(node3, "OPTIMIZE TABLE mt1", sleep_time=5, retry_count=60) diff --git a/tests/performance/group_array_sorted.xml b/tests/performance/group_array_sorted.xml new file mode 100644 index 00000000000..d5887998341 --- /dev/null +++ b/tests/performance/group_array_sorted.xml @@ -0,0 +1,31 @@ + + + 30000000000 + + + + + millions + + 50 + 100 + + + + window + + 10 + 1000 + 10000 + + + + + create table sorted_{millions}m engine MergeTree order by k as select number % 100 k, rand() v from numbers_mt(1000000 * {millions}) + optimize table sorted_{millions}m final + + select k, groupArraySorted({window})(v) from sorted_{millions}m group by k format Null + select k % 10 kk, groupArraySorted({window})(v) from sorted_{millions}m group by kk format Null + + drop table if exists sorted_{millions}m + diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference index 00a2cd14700..cd9f0142d45 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference @@ -3,7 +3,7 @@ runtime exceptions 0.05 unknown runtime exceptions 0.01 messages shorter than 10 1 messages shorter than 16 3 -exceptions shorter than 30 3 +exceptions shorter than 30 3 [] noisy messages 0.3 noisy Trace messages 0.16 noisy Debug messages 0.09 diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 63432f127aa..062806baae9 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -13,11 +13,11 @@ select 'runtime messages', greatest(coalesce(sum(length(message_format_string) = where message not like '% Received from %clickhouse-staging.com:9440%'; -- Check the same for exceptions. The value was 0.03 -select 'runtime exceptions', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs +select 'runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs where (message like '%DB::Exception%' or message like '%Coordination::Exception%') and message not like '% Received from %clickhouse-staging.com:9440%'; -select 'unknown runtime exceptions', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.01) from logs where +select 'unknown runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.01) from logs where (message like '%DB::Exception%' or message like '%Coordination::Exception%') and message not like '% Received from %' and message not like '%(SYNTAX_ERROR)%'; @@ -46,14 +46,20 @@ create temporary table known_short_messages (s String) as select * from (select 'Attempt to read after eof', 'String size is too big ({}), maximum: {}', 'Processed: {}%', 'Creating {}: {}', 'Table {}.{} doesn''t exist', 'Invalid cache key hex: {}', 'User has been dropped', 'Illegal type {} of argument of function {}. Should be DateTime or DateTime64', +'Unknown statistic column: {}', 'Bad SSH public key provided', 'Database {} does not exist', 'Substitution {} is not set', 'Invalid cache key hex: {}' ] as arr) array join arr; -- Check that we don't have too many short meaningless message patterns. -select 'messages shorter than 10', max2(countDistinctOrDefault(message_format_string), 1) from logs where length(message_format_string) < 10 and message_format_string not in known_short_messages; +select 'messages shorter than 10', + greatest(uniqExact(message_format_string), 1) + from logs + where length(message_format_string) < 10 and message_format_string not in known_short_messages; -- Same as above. Feel free to update the threshold or remove this query if really necessary -select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; +select 'messages shorter than 16', + greatest(uniqExact(message_format_string), 3) + from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; -- Unlike above, here we look at length of the formatted message, not format string. Most short format strings are fine because they end up decorated with context from outer or inner exceptions, e.g.: -- "Expected end of line" -> "Code: 117. DB::Exception: Expected end of line: (in file/uri /var/lib/clickhouse/user_files/data_02118): (at row 1)" @@ -62,42 +68,53 @@ select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_st -- This table currently doesn't have enough information to do this reliably, so we just regex search for " (ERROR_NAME_IN_CAPS)" and hope that's good enough. -- For the "Code: 123. DB::Exception: " part, we just subtract 26 instead of searching for it. Because sometimes it's not at the start, e.g.: -- "Unexpected error, will try to restart main thread: Code: 341. DB::Exception: Unexpected error: Code: 57. DB::Exception:[...]" -select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 3) from logs - where message ilike '%DB::Exception%' and if(length(regexpExtract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages; +select 'exceptions shorter than 30', + greatest(uniqExact(message_format_string), 3) AS c, + c = 3 ? [] : groupUniqArray(message_format_string) + from logs + where message ilike '%DB::Exception%' and if(length(extract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages; -- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold -select 'noisy messages', max2((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30); +select 'noisy messages', + greatest((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30); -- Same as above, but excluding Test level (actually finds top 1 Trace message) with ('Access granted: {}{}', '{} -> {}') as frequent_in_tests -select 'noisy Trace messages', max2((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests - group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16); +select 'noisy Trace messages', + greatest((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests + group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16); -- Same as above for Debug -select 'noisy Debug messages', max2((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09); +select 'noisy Debug messages', + greatest((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09); -- Same as above for Info -select 'noisy Info messages', max2((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05); +select 'noisy Info messages', + greatest((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05); -- Same as above for Warning with ('Not enabled four letter command {}') as frequent_in_tests -select 'noisy Warning messages', max2(coalesce((select countOrDefault() from logs where level = 'Warning' and message_format_string not in frequent_in_tests +select 'noisy Warning messages', + greatest(coalesce((select count() from logs where level = 'Warning' and message_format_string not in frequent_in_tests group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.01); -- Same as above for Error -select 'noisy Error messages', max2(coalesce((select countOrDefault() from logs where level = 'Error' group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.02); +select 'noisy Error messages', + greatest(coalesce((select count() from logs where level = 'Error' group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.02); select 'no Fatal messages', count() from logs where level = 'Fatal'; -- Avoid too noisy messages: limit the number of messages with high frequency -select 'number of too noisy messages', max2(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10); -select 'number of noisy messages', max2(count(), 10) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.05); +select 'number of too noisy messages', + greatest(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10); +select 'number of noisy messages', + greatest(count(), 10) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.05); -- Each message matches its pattern (returns 0 rows) --- FIXME maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage -select 'incorrect patterns', max2(countDistinct(message_format_string), 15) from ( +-- Note: maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage +select 'incorrect patterns', greatest(uniqExact(message_format_string), 15) from ( select message_format_string, any(message) as any_message from logs where ((rand() % 8) = 0) and message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s) diff --git a/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference b/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference new file mode 100644 index 00000000000..208e649c056 --- /dev/null +++ b/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference @@ -0,0 +1,7 @@ +20 +20 +20 +20 +20 +20 +20 diff --git a/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql b/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql new file mode 100644 index 00000000000..62eec6f324b --- /dev/null +++ b/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql @@ -0,0 +1,25 @@ +-- Tags: shard + +-- test for #56790 + +DROP TABLE IF EXISTS test_local; + +CREATE TABLE test_local (x Int64) ENGINE = MergeTree order by x as select * from numbers(10); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +set prefer_localhost_replica=0; + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +DROP TABLE test_local; diff --git a/tests/queries/0_stateless/00116_storage_set.reference b/tests/queries/0_stateless/00116_storage_set.reference index 01bd24ebe17..b68e740f72b 100644 --- a/tests/queries/0_stateless/00116_storage_set.reference +++ b/tests/queries/0_stateless/00116_storage_set.reference @@ -19,3 +19,5 @@ abc Hello World abc +Hello +Hello diff --git a/tests/queries/0_stateless/00116_storage_set.sql b/tests/queries/0_stateless/00116_storage_set.sql index 0eeed7e859a..c156b387c8f 100644 --- a/tests/queries/0_stateless/00116_storage_set.sql +++ b/tests/queries/0_stateless/00116_storage_set.sql @@ -1,5 +1,6 @@ DROP TABLE IF EXISTS set; DROP TABLE IF EXISTS set2; +DROP TABLE IF EXISTS tab; CREATE TABLE set (x String) ENGINE = Set; @@ -26,4 +27,9 @@ SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s RENAME TABLE set2 TO set; SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; +create table tab (x String) engine = MergeTree order by x as select 'Hello'; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=0; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=1; +DROP TABLE tab; + DROP TABLE set; diff --git a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql index 965ce45fb90..422f4a010f1 100644 --- a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql +++ b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql @@ -8,8 +8,6 @@ SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 SET SELECT 'distributed_group_by_no_merge=2'; SET max_distributed_connections=1; SET max_threads=1; --- breaks any(_shard_num) -SET optimize_move_functions_out_of_any=0; SELECT 'LIMIT'; SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1 SETTINGS distributed_group_by_no_merge=2; diff --git a/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference index 50874ac9b2e..f22c953e739 100644 --- a/tests/queries/0_stateless/00718_format_datetime.reference +++ b/tests/queries/0_stateless/00718_format_datetime.reference @@ -64,3 +64,13 @@ no formatting pattern no formatting pattern 2022-12-08 18:11:29.000000 2022-12-08 00:00:00.000000 2022-12-08 00:00:00.000000 +01 +01 +02 +02 +02 +1 +01 +2 +2 +02 diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql index c0db6a4f64e..4f2ce70965b 100644 --- a/tests/queries/0_stateless/00718_format_datetime.sql +++ b/tests/queries/0_stateless/00718_format_datetime.sql @@ -90,3 +90,15 @@ select formatDateTime(toDateTime64('2022-12-08 18:11:29.1234', 0, 'UTC'), '%F %T select formatDateTime(toDateTime('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); select formatDateTime(toDate32('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); select formatDateTime(toDate('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); + +-- %c %k %l with different formatdatetime_format_without_leading_zeros +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 1; diff --git a/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference index af5626b4a11..6fb23c072d3 100644 --- a/tests/queries/0_stateless/00727_concat.reference +++ b/tests/queries/0_stateless/00727_concat.reference @@ -1 +1,74 @@ -Hello, world! +-- Const string + non-const arbitrary type +With 42 +With 43 +With 44 +With 45 +With 46 +With 47 +With 48 +With 49 +With 50 +With 51 +With 52 +With 53 +With 42.42 +With 43.43 +With 44 +With true +With false +With foo +With bar +With foo +With bar +With foo +With bar +With foo +With bar +With 42 +With 42 +With fae310ca-d52a-4923-9e9b-02bf67f4b009 +With 2023-11-14 +With 2123-11-14 +With 2023-11-14 05:50:12 +With 2023-11-14 05:50:12.123 +With hallo +With [\'foo\',\'bar\'] +With {"foo":"bar"} +With (42,\'foo\') +With {42:\'foo\'} +With 122.233.64.201 +With 2001:1:130f:2:3:9c0:876a:130b +With (42,43) +With [(0,0),(10,0),(10,10),(0,10)] +With [[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]] +With [[[(0,0),(10,0),(10,10),(0,10)]],[[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]] +-- SimpleAggregateFunction +With 42 +With 4 +-- Nested +With [\'foo\',\'bar\'][\'qaz\',\'qux\'] +-- NULL arguments +\N +\N +\N +\N +\N +\N +\N +-- Various arguments tests +Non-const strings +Two arguments test +Three arguments test +3 arguments test with int type +42144 +42144255 +42144 +42144255 +-- Single argument tests +42 +42 +foo +foo +\N +\N +Testing the alias diff --git a/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql index 800ebd5ec53..f5048dcaaae 100644 --- a/tests/queries/0_stateless/00727_concat.sql +++ b/tests/queries/0_stateless/00727_concat.sql @@ -1 +1,96 @@ -SELECT CONCAT('Hello', ', ', 'world!'); +-- Tags: no-fasttest +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry + +SET allow_experimental_object_type = 1; +SET allow_suspicious_low_cardinality_types=1; + +SELECT '-- Const string + non-const arbitrary type'; +SELECT concat('With ', materialize(42 :: Int8)); +SELECT concat('With ', materialize(43 :: Int16)); +SELECT concat('With ', materialize(44 :: Int32)); +SELECT concat('With ', materialize(45 :: Int64)); +SELECT concat('With ', materialize(46 :: Int128)); +SELECT concat('With ', materialize(47 :: Int256)); +SELECT concat('With ', materialize(48 :: UInt8)); +SELECT concat('With ', materialize(49 :: UInt16)); +SELECT concat('With ', materialize(50 :: UInt32)); +SELECT concat('With ', materialize(51 :: UInt64)); +SELECT concat('With ', materialize(52 :: UInt128)); +SELECT concat('With ', materialize(53 :: UInt256)); +SELECT concat('With ', materialize(42.42 :: Float32)); +SELECT concat('With ', materialize(43.43 :: Float64)); +SELECT concat('With ', materialize(44.44 :: Decimal(2))); +SELECT concat('With ', materialize(true :: Bool)); +SELECT concat('With ', materialize(false :: Bool)); +SELECT concat('With ', materialize('foo' :: String)); +SELECT concat('With ', materialize('bar' :: FixedString(3))); +SELECT concat('With ', materialize('foo' :: Nullable(String))); +SELECT concat('With ', materialize('bar' :: Nullable(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(String))); +SELECT concat('With ', materialize('bar' :: LowCardinality(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(Nullable(String)))); +SELECT concat('With ', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))); +SELECT concat('With ', materialize(42 :: LowCardinality(Nullable(UInt32)))); +SELECT concat('With ', materialize(42 :: LowCardinality(UInt32))); +SELECT concat('With ', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)); +SELECT concat('With ', materialize('2023-11-14' :: Date)); +SELECT concat('With ', materialize('2123-11-14' :: Date32)); +SELECT concat('With ', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))); +SELECT concat('With ', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); +SELECT concat('With ', materialize('hallo' :: Enum('hallo' = 1))); +SELECT concat('With ', materialize(['foo', 'bar'] :: Array(String))); +SELECT concat('With ', materialize('{"foo": "bar"}' :: JSON)); +SELECT concat('With ', materialize((42, 'foo') :: Tuple(Int32, String))); +SELECT concat('With ', materialize(map(42, 'foo') :: Map(Int32, String))); +SELECT concat('With ', materialize('122.233.64.201' :: IPv4)); +SELECT concat('With ', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)); +SELECT concat('With ', materialize((42, 43) :: Point)); +SELECT concat('With ', materialize([(0,0),(10,0),(10,10),(0,10)] :: Ring)); +SELECT concat('With ', materialize([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]] :: Polygon)); +SELECT concat('With ', materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]] :: MultiPolygon)); + +SELECT '-- SimpleAggregateFunction'; +DROP TABLE IF EXISTS concat_saf_test; +CREATE TABLE concat_saf_test(x SimpleAggregateFunction(max, Int32)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO concat_saf_test VALUES (42); +INSERT INTO concat_saf_test SELECT max(number) FROM numbers(5); +SELECT concat('With ', x) FROM concat_saf_test ORDER BY x DESC; +DROP TABLE concat_saf_test; + +SELECT '-- Nested'; +DROP TABLE IF EXISTS concat_nested_test; +CREATE TABLE concat_nested_test(attrs Nested(k String, v String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO concat_nested_test VALUES (['foo', 'bar'], ['qaz', 'qux']); +SELECT concat('With ', attrs.k, attrs.v) FROM concat_nested_test; +DROP TABLE concat_nested_test; + +SELECT '-- NULL arguments'; +SELECT concat(NULL, NULL); +SELECT concat(NULL, materialize(NULL :: Nullable(UInt64))); +SELECT concat(materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); + +SELECT '-- Various arguments tests'; +SELECT concat(materialize('Non-const'), materialize(' strings')); +SELECT concat('Two arguments ', 'test'); +SELECT concat('Three ', 'arguments', ' test'); +SELECT concat(materialize(3 :: Int64), ' arguments test', ' with int type'); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64)); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64), materialize(255 :: UInt32)); +SELECT concat(42, 144); +SELECT concat(42, 144, 255); + +SELECT '-- Single argument tests'; +SELECT concat(42); +SELECT concat(materialize(42)); +SELECT concat('foo'); +SELECT concat(materialize('foo')); +SELECT concat(NULL); +SELECT concat(materialize(NULL :: Nullable(UInt64))); + +SELECT CONCAT('Testing the ', 'alias'); + +SELECT concat(); -- { serverError 42 } diff --git a/tests/queries/0_stateless/00732_base64_functions.reference b/tests/queries/0_stateless/00732_base64_functions.reference index f97c19427e7..8f91ffa74ab 100644 --- a/tests/queries/0_stateless/00732_base64_functions.reference +++ b/tests/queries/0_stateless/00732_base64_functions.reference @@ -21,9 +21,9 @@ fooba foobar 1 1 1 1 -fooba -~ + + + Zm9v foo foo -TEcgT3B0aW11cw== diff --git a/tests/queries/0_stateless/00732_base64_functions.sql b/tests/queries/0_stateless/00732_base64_functions.sql index 99268004003..3c60bf939fe 100644 --- a/tests/queries/0_stateless/00732_base64_functions.sql +++ b/tests/queries/0_stateless/00732_base64_functions.sql @@ -2,17 +2,23 @@ SET send_logs_level = 'fatal'; -SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); +SELECT base64Encode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Encode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- test with valid inputs + +SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); SELECT base64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); SELECT tryBase64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); SELECT base64Decode(base64Encode('foo')) = 'foo', base64Encode(base64Decode('Zm9v')) == 'Zm9v'; SELECT tryBase64Decode(base64Encode('foo')) = 'foo', base64Encode(tryBase64Decode('Zm9v')) == 'Zm9v'; -SELECT base64Encode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -SELECT base64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -SELECT tryBase64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- test with invalid inputs SELECT base64Decode('Zm9vYmF=Zm9v'); -- { serverError INCORRECT_DATA } SELECT tryBase64Decode('Zm9vYmF=Zm9v'); @@ -20,9 +26,11 @@ SELECT tryBase64Decode('Zm9vYmF=Zm9v'); SELECT base64Decode('foo'); -- { serverError INCORRECT_DATA } SELECT tryBase64Decode('foo'); +SELECT base64Decode('aoeo054640eu='); -- { serverError INCORRECT_DATA } +SELECT tryBase64Decode('aoeo054640eu='); + +-- test FixedString arguments + select base64Encode(toFixedString('foo', 3)); select base64Decode(toFixedString('Zm9v', 4)); select tryBase64Decode(toFixedString('Zm9v', 4)); - --- This query reproduces a bug in TurboBase64 library (which we no longer use) -select distinct base64Encode(materialize('LG Optimus')) from numbers(100); diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference index 58f8b7abfb3..3de05d66188 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference @@ -49,5 +49,5 @@ Check total_bytes/total_rows for Set 2048 50 2048 100 Check total_bytes/total_rows for Join -10240 50 -10240 100 +1 50 +1 100 diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql index 16085c8a995..ae9db656f00 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql @@ -134,7 +134,7 @@ DROP TABLE check_system_tables; SELECT 'Check total_bytes/total_rows for Join'; CREATE TABLE check_system_tables Engine=Join(ANY, LEFT, number) AS SELECT * FROM numbers(50); -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); INSERT INTO check_system_tables SELECT number+50 FROM numbers(50); -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); DROP TABLE check_system_tables; diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index bab2304cec2..12d889a7137 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -7,6 +7,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + $CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS part_header_r1; @@ -54,8 +57,8 @@ elapsed=1 until [ $elapsed -eq 5 ]; do sleep $(( elapsed++ )) - count1=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/1r1/parts'") - count2=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/2r1/parts'") + count1=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/1$REPLICA/parts'") + count2=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/2$REPLICA/parts'") [[ $count1 == 1 && $count2 == 1 ]] && break done @@ -64,10 +67,10 @@ $CLICKHOUSE_CLIENT -nm -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; SELECT name FROM system.parts WHERE active AND database = currentDatabase() AND table = 'part_header_r1'; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/1r1/parts'; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/1$REPLICA/parts'; SELECT '*** replica 2 ***'; SELECT name FROM system.parts WHERE active AND database = currentDatabase() AND table = 'part_header_r2'; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/2r1/parts'; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/2$REPLICA/parts'; SELECT '*** Test ALTER ***'; ALTER TABLE part_header_r1 MODIFY COLUMN y String; diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index ad0146b9d99..57a41526900 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}" - +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS elog;" @@ -30,33 +30,33 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 3, 'h $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 3 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 4 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 5 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh index f61a60a0bda..8ebe1807a1b 100755 --- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh +++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh @@ -69,7 +69,7 @@ function alter_table() if [ -z "$table" ]; then continue; fi $CLICKHOUSE_CLIENT --distributed_ddl_task_timeout=0 -q \ "alter table $table update n = n + (select max(n) from merge(REGEXP('${CLICKHOUSE_DATABASE}.*'), '.*')) where 1 settings allow_nondeterministic_mutations=1" \ - 2>&1| grep -Fa "Exception: " | grep -Fv "Cannot enqueue query" | grep -Fv "ZooKeeper session expired" | grep -Fv UNKNOWN_DATABASE | grep -Fv UNKNOWN_TABLE | grep -Fv TABLE_IS_READ_ONLY | grep -Fv TABLE_IS_DROPPED | grep -Fv "Error while executing table function merge" + 2>&1| grep -Fa "Exception: " | grep -Fv "Cannot enqueue query" | grep -Fv "ZooKeeper session expired" | grep -Fv UNKNOWN_DATABASE | grep -Fv UNKNOWN_TABLE | grep -Fv TABLE_IS_READ_ONLY | grep -Fv TABLE_IS_DROPPED | grep -Fv ABORTED | grep -Fv "Error while executing table function merge" sleep 0.$RANDOM done } diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 9289030331f..6c639926aac 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -24,6 +24,10 @@ ALTER DROP INDEX ['DROP INDEX'] TABLE ALTER INDEX ALTER MATERIALIZE INDEX ['MATERIALIZE INDEX'] TABLE ALTER INDEX ALTER CLEAR INDEX ['CLEAR INDEX'] TABLE ALTER INDEX ALTER INDEX ['INDEX'] \N ALTER TABLE +ALTER ADD STATISTIC ['ALTER ADD STATISTIC'] TABLE ALTER STATISTIC +ALTER DROP STATISTIC ['ALTER DROP STATISTIC'] TABLE ALTER STATISTIC +ALTER MATERIALIZE STATISTIC ['ALTER MATERIALIZE STATISTIC'] TABLE ALTER STATISTIC +ALTER STATISTIC ['STATISTIC'] \N ALTER TABLE ALTER ADD PROJECTION ['ADD PROJECTION'] TABLE ALTER PROJECTION ALTER DROP PROJECTION ['DROP PROJECTION'] TABLE ALTER PROJECTION ALTER MATERIALIZE PROJECTION ['MATERIALIZE PROJECTION'] TABLE ALTER PROJECTION diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.reference b/tests/queries/0_stateless/01280_ttl_where_group_by.reference deleted file mode 100644 index 65e7e5b158f..00000000000 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.reference +++ /dev/null @@ -1,26 +0,0 @@ -ttl_01280_1 -1 1 0 4 -1 2 3 7 -1 3 0 5 -2 1 0 1 -2 1 20 1 -ttl_01280_2 -1 1 [0,2,3] 4 -1 1 [5,4,1] 13 -1 3 [1,0,1,0] 17 -2 1 [3,1,0,3] 8 -3 1 [2,4,5] 8 -ttl_01280_3 -1 1 0 4 -1 1 10 6 -2 1 0 3 -3 1 8 2 -ttl_01280_4 -0 4 -13 9 -ttl_01280_5 -1 2 7 5 -2 3 6 5 -ttl_01280_6 -1 3 5 -2 3 5 diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sh b/tests/queries/0_stateless/01280_ttl_where_group_by.sh deleted file mode 100755 index e6f83d6edd1..00000000000 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1" - -function optimize() -{ - for _ in {0..20}; do - $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE $1 FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null && break - sleep 0.3 - done -} - -# "SETTINGS max_parts_to_merge_at_once = 1" prevents merges to start before our own OPTIMIZE FINAL - -echo "ttl_01280_1" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_1 (a Int, b Int, x Int, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second delete where x % 10 == 0 and y > 5 SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_1 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_1 values (1, 1, 10, 6, now()); -insert into ttl_01280_1 values (1, 2, 3, 7, now()); -insert into ttl_01280_1 values (1, 3, 0, 5, now()); -insert into ttl_01280_1 values (2, 1, 20, 1, now()); -insert into ttl_01280_1 values (2, 1, 0, 1, now()); -insert into ttl_01280_1 values (3, 1, 0, 8, now());" - -sleep 2 -optimize "ttl_01280_1" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_1 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_2" - -echo "ttl_01280_2" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_2 values (1, 1, array(0, 2, 3), 4, now() + 10); -insert into ttl_01280_2 values (1, 1, array(5, 4, 3), 6, now()); -insert into ttl_01280_2 values (1, 1, array(5, 5, 1), 7, now()); -insert into ttl_01280_2 values (1, 3, array(3, 0, 4), 5, now()); -insert into ttl_01280_2 values (1, 3, array(1, 1, 2, 1), 9, now()); -insert into ttl_01280_2 values (1, 3, array(3, 2, 1, 0), 3, now()); -insert into ttl_01280_2 values (2, 1, array(3, 3, 3), 7, now()); -insert into ttl_01280_2 values (2, 1, array(11, 1, 0, 3), 1, now()); -insert into ttl_01280_2 values (3, 1, array(2, 4, 5), 8, now());" - -sleep 2 -optimize "ttl_01280_2" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_2 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_3" - -echo "ttl_01280_3" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), x = argMax(x, d), y = argMax(y, d), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_3 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_3 values (1, 1, 10, 6, now() + 1); -insert into ttl_01280_3 values (1, 2, 3, 7, now()); -insert into ttl_01280_3 values (1, 3, 0, 5, now()); -insert into ttl_01280_3 values (2, 1, 20, 1, now()); -insert into ttl_01280_3 values (2, 1, 0, 3, now() + 1); -insert into ttl_01280_3 values (3, 1, 0, 3, now()); -insert into ttl_01280_3 values (3, 2, 8, 2, now() + 1); -insert into ttl_01280_3 values (3, 5, 5, 8, now());" - -sleep 2 -optimize "ttl_01280_3" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_3 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_4" - -echo "ttl_01280_4" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_4 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_4 values (10, 2, 3, 3, now()); -insert into ttl_01280_4 values (2, 10, 1, 7, now()); -insert into ttl_01280_4 values (3, 3, 5, 2, now()); -insert into ttl_01280_4 values (1, 5, 4, 9, now())" - -sleep 2 -optimize "ttl_01280_4" -$CLICKHOUSE_CLIENT --query "select x, y from ttl_01280_4 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5" - -echo "ttl_01280_5" -$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x), b = argMax(b, -b) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_5 values (1, 2, 3, 5, now()); -insert into ttl_01280_5 values (2, 10, 1, 5, now()); -insert into ttl_01280_5 values (2, 3, 5, 5, now()); -insert into ttl_01280_5 values (1, 5, 4, 5, now());" - -sleep 2 -optimize "ttl_01280_5" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_5 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_6" - -echo "ttl_01280_6" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_6 values (1, 2, 3, 5, now()); -insert into ttl_01280_6 values (2, 10, 3, 5, now()); -insert into ttl_01280_6 values (2, 3, 3, 5, now()); -insert into ttl_01280_6 values (1, 5, 3, 5, now())" - -sleep 2 -optimize "ttl_01280_6" -$CLICKHOUSE_CLIENT --query "select a, x, y from ttl_01280_6 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_1" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_2" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_3" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_4" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_5" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_6" diff --git a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql index 2937e856bf5..3f08936e636 100644 --- a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql +++ b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql @@ -1,5 +1,4 @@ set optimize_aggregators_of_group_by_keys = 1; -set optimize_move_functions_out_of_any = 0; SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; diff --git a/tests/queries/0_stateless/01322_any_input_optimize.reference b/tests/queries/0_stateless/01322_any_input_optimize.reference deleted file mode 100644 index f88f2f5937c..00000000000 --- a/tests/queries/0_stateless/01322_any_input_optimize.reference +++ /dev/null @@ -1,32 +0,0 @@ -SELECT any(number) + (any(number) * 2) -FROM numbers(1, 2) -3 -SELECT anyLast(number) + (anyLast(number) * 2) -FROM numbers(1, 2) -6 -WITH any(number) * 3 AS x -SELECT x -FROM numbers(1, 2) -3 -SELECT - anyLast(number) * 3 AS x, - x -FROM numbers(1, 2) -6 6 -SELECT any(number + (number * 2)) -FROM numbers(1, 2) -3 -SELECT anyLast(number + (number * 2)) -FROM numbers(1, 2) -6 -WITH any(number * 3) AS x -SELECT x -FROM numbers(1, 2) -3 -SELECT - anyLast(number * 3) AS x, - x -FROM numbers(1, 2) -6 6 -arrayJoin -0 [] diff --git a/tests/queries/0_stateless/01322_any_input_optimize.sql b/tests/queries/0_stateless/01322_any_input_optimize.sql deleted file mode 100644 index 4c3345f4be4..00000000000 --- a/tests/queries/0_stateless/01322_any_input_optimize.sql +++ /dev/null @@ -1,34 +0,0 @@ -SET optimize_move_functions_out_of_any = 1; - -EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2); -SELECT any(number + number * 2) FROM numbers(1, 2); - -EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2); -SELECT anyLast(number + number * 2) FROM numbers(1, 2); - -EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); - -EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); - -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } - -SET optimize_move_functions_out_of_any = 0; - -EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2); -SELECT any(number + number * 2) FROM numbers(1, 2); - -EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2); -SELECT anyLast(number + number * 2) FROM numbers(1, 2); - -EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); - -EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); - -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } - -SELECT 'arrayJoin'; -SELECT *, any(arrayJoin([[], []])) FROM numbers(1) GROUP BY number; diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 2d761df998e..67a2a70b509 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -5,6 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + # Check that if we have one inactive replica and a huge number of INSERTs to active replicas, # the number of nodes in ZooKeeper does not grow unbounded. @@ -32,16 +35,16 @@ for _ in {1..60}; do done -$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1' AND name = 'log'"; +$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD' AND name = 'log'"; echo -e '\n---\n'; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/1r1' AND name = 'is_lost'"; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/2r1' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/1$REPLICA' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/2$REPLICA' AND name = 'is_lost'"; echo -e '\n---\n'; $CLICKHOUSE_CLIENT --query "ATTACH TABLE r2" $CLICKHOUSE_CLIENT --receive_timeout 600 --query "SYSTEM SYNC REPLICA r2" # Need to increase timeout, otherwise it timed out in debug build -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/2r1' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/2$REPLICA' AND name = 'is_lost'"; $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; diff --git a/tests/queries/0_stateless/01398_any_with_alias.reference b/tests/queries/0_stateless/01398_any_with_alias.reference deleted file mode 100644 index 4f8e72ef29c..00000000000 --- a/tests/queries/0_stateless/01398_any_with_alias.reference +++ /dev/null @@ -1,8 +0,0 @@ -"n" -0 -SELECT any(number) * any(number) AS n -FROM numbers(100) -"n" -0,0 -SELECT (any(number), any(number) * 2) AS n -FROM numbers(100) diff --git a/tests/queries/0_stateless/01398_any_with_alias.sql b/tests/queries/0_stateless/01398_any_with_alias.sql deleted file mode 100644 index a65b8132c67..00000000000 --- a/tests/queries/0_stateless/01398_any_with_alias.sql +++ /dev/null @@ -1,7 +0,0 @@ -SET optimize_move_functions_out_of_any = 1; - -SELECT any(number * number) AS n FROM numbers(100) FORMAT CSVWithNames; -EXPLAIN SYNTAX SELECT any(number * number) AS n FROM numbers(100); - -SELECT any((number, number * 2)) as n FROM numbers(100) FORMAT CSVWithNames; -EXPLAIN SYNTAX SELECT any((number, number * 2)) as n FROM numbers(100); diff --git a/tests/queries/0_stateless/01414_optimize_any_bug.sql b/tests/queries/0_stateless/01414_optimize_any_bug.sql deleted file mode 100644 index ec24a09fc11..00000000000 --- a/tests/queries/0_stateless/01414_optimize_any_bug.sql +++ /dev/null @@ -1,19 +0,0 @@ -DROP TABLE IF EXISTS test; - -CREATE TABLE test -( - `Source.C1` Array(UInt64), - `Source.C2` Array(UInt64) -) -ENGINE = MergeTree() -ORDER BY tuple(); - -SET enable_positional_arguments=0; -SET optimize_move_functions_out_of_any = 1; - -SELECT any(arrayFilter((c, d) -> (4 = d), `Source.C1`, `Source.C2`)[1]) AS x -FROM test -WHERE 0 -GROUP BY 42; - -DROP TABLE test; diff --git a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference index 8c76b239991..1fb8df14afc 100644 --- a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference +++ b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference @@ -1,10 +1,8 @@ 1 1 -1 other google 1 -1 2 other other diff --git a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql index 1e1d87a5ad5..91044859c1c 100644 --- a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql +++ b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql @@ -1,11 +1,9 @@ -- Tags: distributed -SET optimize_move_functions_out_of_any = 1; SET optimize_injective_functions_inside_uniq = 1; SET optimize_arithmetic_operations_in_aggregate_functions = 1; SET optimize_if_transform_strings_to_enum = 1; -SELECT any(number + 1) FROM numbers(1); SELECT uniq(bitNot(number)) FROM numbers(1); SELECT sum(number + 1) FROM numbers(1); SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM numbers(1); @@ -20,7 +18,6 @@ CREATE TABLE dist AS local_table ENGINE = Distributed(test_cluster_two_shards_lo INSERT INTO local_table SELECT number FROM numbers(1); -SELECT any(number + 1) FROM dist; SELECT uniq(bitNot(number)) FROM dist; SELECT sum(number + 1) FROM dist; SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM dist; diff --git a/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql b/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql index b5ad6c06e96..c4a3c939c26 100644 --- a/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql +++ b/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql @@ -16,7 +16,7 @@ INSERT INTO replicated_mutations_empty_partitions SETTINGS insert_keeper_fault_i SELECT count(distinct value) FROM replicated_mutations_empty_partitions; -SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/s1/block_numbers'; +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '3'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '4'; @@ -24,7 +24,7 @@ ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '5'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '9'; -- still ten records -SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/s1/block_numbers'; +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; ALTER TABLE replicated_mutations_empty_partitions MODIFY COLUMN value UInt64 SETTINGS replication_alter_partitions_sync=2; diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh index f8f3ccd6dd6..2762f918d72 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh @@ -7,6 +7,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=./replication.lib . "$CURDIR"/replication.lib +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + REPLICAS=5 for i in $(seq $REPLICAS); do @@ -79,9 +82,9 @@ while true; do done -metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r11/' and name = 'metadata_version'") +metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/${REPLICA}1/' and name = 'metadata_version'") for i in $(seq $REPLICAS); do - replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r1$i/' and name = 'metadata_version'") + replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/${REPLICA}$i/' and name = 'metadata_version'") if [ "$metadata_version" != "$replica_metadata_version" ]; then echo "Metadata version on replica $i differs from the first replica, FAIL" diff --git a/tests/queries/0_stateless/01650_any_null_if.reference b/tests/queries/0_stateless/01650_any_null_if.reference deleted file mode 100644 index e965047ad7c..00000000000 --- a/tests/queries/0_stateless/01650_any_null_if.reference +++ /dev/null @@ -1 +0,0 @@ -Hello diff --git a/tests/queries/0_stateless/01650_any_null_if.sql b/tests/queries/0_stateless/01650_any_null_if.sql deleted file mode 100644 index 17f57e92032..00000000000 --- a/tests/queries/0_stateless/01650_any_null_if.sql +++ /dev/null @@ -1,6 +0,0 @@ -SELECT any(nullIf(s, '')) FROM (SELECT arrayJoin(['', 'Hello']) AS s); - -SET optimize_move_functions_out_of_any = 0; -EXPLAIN SYNTAX select any(nullIf('', ''), 'some text'); -- { serverError 42 } -SET optimize_move_functions_out_of_any = 1; -EXPLAIN SYNTAX select any(nullIf('', ''), 'some text'); -- { serverError 42 } diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference index 664d8e84f27..b4eaf226106 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference @@ -14,5 +14,3 @@ abandonable_lock-other failed_parts last_part parallel -shared -shared diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql index cf4bc7650e7..3b321d3cea5 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql @@ -8,17 +8,17 @@ CREATE TABLE sample_table ( ENGINE ReplicatedMergeTree('/clickhouse/{database}/01700_system_zookeeper_path_in/{shard}', '{replica}') ORDER BY tuple(); -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1' AND name like 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas' AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') AND name like 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas' AND name LIKE '%'|| getMacro('replica') ||'%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1') AND name LIKE 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard')) AND name LIKE 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE '%' || getMacro('replica') || '%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1', - '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE 'block%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'), + '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE 'block%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/', name) - FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1')) ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/', name) + FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND name NOT LIKE 'zero_copy_%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'))) ORDER BY name; DROP TABLE IF EXISTS sample_table; diff --git a/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql new file mode 100644 index 00000000000..40847a301c2 --- /dev/null +++ b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id UInt32, s String) Engine = MergeTree ORDER BY id; +CREATE TABLE t2 (id1 UInt32, id2 UInt32) Engine = MergeTree ORDER BY id1 SETTINGS index_granularity = 1; +INSERT INTO t2 SELECT number, number from numbers(100); +ALTER TABLE t2 ADD PROJECTION proj (SELECT id2 ORDER BY id2); +INSERT INTO t2 SELECT number, number from numbers(100); + +SELECT s FROM t1 as lhs LEFT JOIN (SELECT * FROM t2 WHERE id2 = 2) as rhs ON lhs.id = rhs.id2; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql b/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql index e4fb1816c89..06f192adb57 100644 --- a/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql +++ b/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql @@ -1,5 +1,3 @@ --- Tags: disabled --- FIXME https://github.com/ClickHouse/ClickHouse/issues/49552 -- Test that check the correctness of the result for optimize_aggregation_in_order and projections, -- not that this optimization will take place. @@ -20,7 +18,7 @@ CREATE TABLE normal ) ) ENGINE = MergeTree -ORDER BY (key, ts); +ORDER BY tuple(); INSERT INTO normal SELECT number, @@ -52,7 +50,7 @@ CREATE TABLE agg ) ) ENGINE = MergeTree -ORDER BY (key, ts); +ORDER BY tuple(); INSERT INTO agg SELECT 1, diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference index 9016e731106..6adb2382a6f 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -19,10 +19,8 @@ explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 Expression (Projection) LimitBy Union - Expression (Before LIMIT BY) - LimitBy - Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) - ReadFromStorage (SystemNumbers) + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) + ReadFromStorage (SystemNumbers) Expression ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized @@ -58,11 +56,10 @@ Expression (Projection) Expression (Before LIMIT BY) Sorting (Merge sorted streams for ORDER BY, without aggregation) Union - LimitBy - Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part])) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) - ReadFromStorage (SystemNumbers) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part]) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized Expression (Projection) diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.reference b/tests/queries/0_stateless/02003_memory_limit_in_client.reference index 541b3a18e90..fae767357c6 100644 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.reference +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.reference @@ -1 +1,13 @@ 60000 +60000 +60000 +60000 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 2d2493828c8..96028f4847a 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -1,4 +1,4 @@ -#!/usr/bin/bash -f +#!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -6,3 +6,19 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" $CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" + +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" + +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference similarity index 100% rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2 similarity index 100% rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2 diff --git a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference index 2ee0f256949..c538301cbd9 100644 --- a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference +++ b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference @@ -1,15 +1,15 @@ Using storage policy: s3_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 Using storage policy: local_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 Using storage policy: azure_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 diff --git a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh index f071a570243..02e98bbb1b0 100755 --- a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh +++ b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh @@ -7,11 +7,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh - for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SET max_memory_usage='20G'; SET enable_filesystem_cache_on_write_operations = 0; @@ -24,11 +23,13 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do query="SELECT * FROM test_02226 LIMIT 10" - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1) + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -39,16 +40,18 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ set remote_filesystem_read_method = 'read'; set local_filesystem_read_method = 'pread'; """ - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1) + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -60,15 +63,17 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ set remote_filesystem_read_method='threadpool'; """ - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1") + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1") - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -79,7 +84,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SELECT * FROM test_02226 WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null; SET enable_filesystem_cache_on_write_operations = 1; @@ -92,5 +97,5 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do INSERT INTO test_02226 SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; """ - clickhouse client --query "DROP TABLE test_02226" + $CLICKHOUSE_CLIENT --query "DROP TABLE test_02226" done diff --git a/tests/queries/0_stateless/02233_interpolate_1.sql b/tests/queries/0_stateless/02233_interpolate_1.sql index 3d416b27f45..d589a18421b 100644 --- a/tests/queries/0_stateless/02233_interpolate_1.sql +++ b/tests/queries/0_stateless/02233_interpolate_1.sql @@ -26,7 +26,7 @@ SELECT n, source, inter FROM ( # Test INTERPOLATE with incompatible expression - should produce error SELECT n, source, inter FROM ( SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 -) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter||'inter'); -- { serverError 44 } +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS reverse(inter)); -- { serverError 44 } # Test INTERPOLATE with column from WITH FILL expression - should produce error SELECT n, source, inter FROM ( diff --git a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh index 9654d3146e2..ae2a2351c6b 100755 --- a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh +++ b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh @@ -90,7 +90,7 @@ hexdump -C $BINARY_FILE_PATH echo echo "Decoded with protoc:" -(cd $SCHEMADIR && $PROTOC_BINARY --decode Message "$PROTOBUF_FILE_NAME".proto) < $BINARY_FILE_PATH +(cd $SCHEMADIR && $PROTOC_BINARY --proto_path=. --proto_path=/usr/share/clickhouse/protos --decode Message "$PROTOBUF_FILE_NAME".proto) < $BINARY_FILE_PATH echo echo "Proto message with wrapper for (NULL, 1), ('', 2), ('str', 3):" diff --git a/tests/queries/0_stateless/02302_s3_file_pruning.reference b/tests/queries/0_stateless/02302_s3_file_pruning.reference index f8d2bdd0612..7e69bdd55db 100644 --- a/tests/queries/0_stateless/02302_s3_file_pruning.reference +++ b/tests/queries/0_stateless/02302_s3_file_pruning.reference @@ -24,4 +24,14 @@ insert into test_02302 select 1 settings s3_create_new_file_on_insert = true; insert into test_02302 select 2 settings s3_create_new_file_on_insert = true; select * from test_02302 where _file like '%1'; 1 +select _file, * from test_02302 where _file like '%1'; +test_02302.1 1 +set max_rows_to_read = 2; +select * from test_02302 where (_file like '%.1' OR _file like '%.2') AND a > 1; +2 +set max_rows_to_read = 999; +select 'a1' as _file, * from test_02302 where _file like '%1' ORDER BY a; +a1 0 +a1 1 +a1 2 drop table test_02302; diff --git a/tests/queries/0_stateless/02302_s3_file_pruning.sql b/tests/queries/0_stateless/02302_s3_file_pruning.sql index 624a87506d1..93fc8a1bc25 100644 --- a/tests/queries/0_stateless/02302_s3_file_pruning.sql +++ b/tests/queries/0_stateless/02302_s3_file_pruning.sql @@ -1,5 +1,5 @@ -- Tags: no-parallel, no-fasttest --- Tag no-fasttest: Depends on AWS +-- Tag no-fasttest: Depends on S3 -- { echo } drop table if exists test_02302; @@ -32,4 +32,14 @@ insert into test_02302 select 1 settings s3_create_new_file_on_insert = true; insert into test_02302 select 2 settings s3_create_new_file_on_insert = true; select * from test_02302 where _file like '%1'; + +select _file, * from test_02302 where _file like '%1'; + +set max_rows_to_read = 2; +select * from test_02302 where (_file like '%.1' OR _file like '%.2') AND a > 1; + +set max_rows_to_read = 999; + +select 'a1' as _file, * from test_02302 where _file like '%1' ORDER BY a; + drop table test_02302; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference index 935c53358c0..68eb282a6a1 100644 --- a/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference @@ -117,5 +117,5 @@ SELECT arrayMap(x -> concat(concat(concat(concat(concat(toString(id), '___\0____ FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), toString(id)), concat(toString(id), NULL), toString(id)); SELECT '--'; -- -SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; +SELECT arrayMap(x -> splitByChar(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql index 8f8b5537da9..48e84246d1c 100644 --- a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql @@ -122,7 +122,7 @@ FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), SELECT '--'; -SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; +SELECT arrayMap(x -> splitByChar(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02402_external_disk_mertrics.reference b/tests/queries/0_stateless/02402_external_disk_mertrics.reference index e8183f05f5d..7614df8ec46 100644 --- a/tests/queries/0_stateless/02402_external_disk_mertrics.reference +++ b/tests/queries/0_stateless/02402_external_disk_mertrics.reference @@ -1,3 +1,3 @@ -1 -1 -1 +ok +ok +ok diff --git a/tests/queries/0_stateless/02402_external_disk_mertrics.sql b/tests/queries/0_stateless/02402_external_disk_mertrics.sql index e9696eb7122..7237ea19775 100644 --- a/tests/queries/0_stateless/02402_external_disk_mertrics.sql +++ b/tests/queries/0_stateless/02402_external_disk_mertrics.sql @@ -31,40 +31,52 @@ FORMAT Null; SYSTEM FLUSH LOGS; SELECT - any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND - any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalSortWritePart']) >= 1 AND - any(ProfileEvents['ExternalSortMerge']) >= 1 AND - any(ProfileEvents['ExternalSortCompressedBytes']) >= 100000 AND - any(ProfileEvents['ExternalSortUncompressedBytes']) >= 100000 AND - count() == 1 + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalSortWritePart']) >= 1 AND + any(ProfileEvents['ExternalSortMerge']) >= 1 AND + any(ProfileEvents['ExternalSortCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalSortUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02402_external_disk_mertrics/sort' AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; SELECT - any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND - any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalAggregationWritePart']) >= 1 AND - any(ProfileEvents['ExternalAggregationMerge']) >= 1 AND - any(ProfileEvents['ExternalAggregationCompressedBytes']) >= 100000 AND - any(ProfileEvents['ExternalAggregationUncompressedBytes']) >= 100000 AND - count() == 1 + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalAggregationWritePart']) >= 1 AND + any(ProfileEvents['ExternalAggregationMerge']) >= 1 AND + any(ProfileEvents['ExternalAggregationCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalAggregationUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02402_external_disk_mertrics/aggregation' AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; SELECT - any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND - any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND - any(ProfileEvents['ExternalJoinWritePart']) >= 1 AND - any(ProfileEvents['ExternalJoinMerge']) >= 0 AND - any(ProfileEvents['ExternalJoinCompressedBytes']) >= 100000 AND - any(ProfileEvents['ExternalJoinUncompressedBytes']) >= 100000 AND - count() == 1 + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalJoinWritePart']) >= 1 AND + any(ProfileEvents['ExternalJoinMerge']) >= 0 AND + any(ProfileEvents['ExternalJoinCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalJoinUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02402_external_disk_mertrics/join' AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index 379eea4dbbb..7bb0b965fbc 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -320,6 +320,7 @@ geoDistance geohashDecode geohashEncode geohashesInBox +getClientHTTPHeader getMacro getOSKernelVersion getServerPort diff --git a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql index 1d01fde56d6..0142afba7f2 100644 --- a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql +++ b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql @@ -21,7 +21,7 @@ select sleepEachRow(3) as higher_probability_of_reproducing_the_issue format Nul system flush logs; -- it should not list unneeded partitions where we cannot merge anything -select * from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%' +select * from system.zookeeper_log where path like '/test/02439/' || getMacro('shard') || '/' || currentDatabase() || '/block_numbers/%' and op_num in ('List', 'SimpleList', 'FilteredList') and path not like '%/block_numbers/1' and path not like '%/block_numbers/123' and event_time >= now() - interval 1 minute diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference index 1f991703c7b..2ece1147d78 100644 --- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference +++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference @@ -1,4 +1,15 @@ 1 rmt -1 rmt1 2 rmt +1 rmt1 2 rmt1 +0 +1 rmt +2 rmt +1 rmt1 +2 rmt1 +1 rmt2 +1 rmt2 +3 rmt2 +5 rmt2 +7 rmt2 +9 rmt2 diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql index fbd90d8ab0f..52e8be236c8 100644 --- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql +++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql @@ -24,4 +24,29 @@ insert into rmt1 values (2); system sync replica rmt; system sync replica rmt1; -select *, _table from merge(currentDatabase(), '') order by (*,), _table; +select *, _table from merge(currentDatabase(), '') order by _table, (*,); +select 0; + +create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02468/{database}2', '1') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; + +system stop cleanup rmt; +system stop merges rmt1; +insert into rmt select * from numbers(10) settings max_block_size=1; +system sync replica rmt1 lightweight; + +alter table rmt replace partition id '0' from rmt2; +alter table rmt1 move partition id '1' to table rmt2; + +detach table rmt sync; +detach table rmt1 sync; + +attach table rmt; +attach table rmt1; + +insert into rmt values (1); +insert into rmt1 values (2); +system sync replica rmt; +system sync replica rmt1; +system sync replica rmt2; + +select *, _table from merge(currentDatabase(), '') order by _table, (*,); diff --git a/tests/queries/0_stateless/02494_query_cache_events.reference b/tests/queries/0_stateless/02494_query_cache_events.reference index 9bcd2820f27..00510f3a0c6 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.reference +++ b/tests/queries/0_stateless/02494_query_cache_events.reference @@ -1,7 +1,4 @@ ---- 1 -0 1 ---- 1 0 1 1 0 diff --git a/tests/queries/0_stateless/02494_query_cache_events.sql b/tests/queries/0_stateless/02494_query_cache_events.sql index 05c0acad4b8..f92e71cb50f 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.sql +++ b/tests/queries/0_stateless/02494_query_cache_events.sql @@ -4,20 +4,7 @@ -- Start with empty query cache QC SYSTEM DROP QUERY CACHE; --- Run a query with QC on. The first execution is a QC miss. -SELECT '---'; SELECT 1 SETTINGS use_query_cache = true; - -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND current_database = currentDatabase() - AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; - - --- Run previous query again with query cache on -SELECT '---'; SELECT 1 SETTINGS use_query_cache = true; SYSTEM FLUSH LOGS; @@ -28,4 +15,6 @@ WHERE type = 'QueryFinish' AND query = 'SELECT 1 SETTINGS use_query_cache = true;' ORDER BY event_time_microseconds; +-- (The 1st execution was a cache miss, the 2nd execution was a cache hit) + SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference index d0cf9ff680b..f646583bbd3 100644 --- a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference +++ b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference @@ -345,6 +345,18 @@ WITH SELECT x; {'argument1':'1','argument2':'2','char':'=','char2':'=','formula':'1+2=3','result':'3','string':'foo=bar'} +-- https://github.com/ClickHouse/ClickHouse/issues/56357 +WITH + extractKeyValuePairs('{"a":"1", "b":"2"}') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; +{'a':'1','b':'2'} -- check str_to_map alias (it is case-insensitive) WITH sTr_tO_mAp('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, diff --git a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql index 804ff4ce880..9277ba6d7ec 100644 --- a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql +++ b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql @@ -481,6 +481,18 @@ WITH SELECT x; +-- https://github.com/ClickHouse/ClickHouse/issues/56357 +WITH + extractKeyValuePairs('{"a":"1", "b":"2"}') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + -- check str_to_map alias (it is case-insensitive) WITH sTr_tO_mAp('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, diff --git a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh index 63f912c6bff..4f3fd0e54f6 100755 --- a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh +++ b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh @@ -22,6 +22,7 @@ SETTINGS min_bytes_for_wide_part = 0, type = cache, max_size = '128Mi', max_file_segment_size = '10Ki', + boundary_alignment = '5Ki', path = '${CLICKHOUSE_TEST_UNIQUE_NAME}', cache_on_write_operations = 1, enable_filesystem_query_cache_limit = 1, diff --git a/tests/queries/0_stateless/02516_projections_with_rollup.sql b/tests/queries/0_stateless/02516_projections_with_rollup.sql index 038caf59264..a87621073af 100644 --- a/tests/queries/0_stateless/02516_projections_with_rollup.sql +++ b/tests/queries/0_stateless/02516_projections_with_rollup.sql @@ -1,6 +1,3 @@ --- Tags: disabled --- FIXME https://github.com/ClickHouse/ClickHouse/issues/49552 - DROP TABLE IF EXISTS video_log; DROP TABLE IF EXISTS video_log_result__fuzz_0; DROP TABLE IF EXISTS rng; @@ -16,7 +13,8 @@ CREATE TABLE video_log ) ENGINE = MergeTree PARTITION BY toDate(datetime) -ORDER BY (user_id, device_id); +ORDER BY (user_id, device_id) +SETTINGS index_granularity_bytes=10485760, index_granularity=8192; CREATE TABLE video_log_result__fuzz_0 ( @@ -62,7 +60,7 @@ LIMIT 10; ALTER TABLE video_log ADD PROJECTION p_norm ( - SELECT + SELECT datetime, device_id, bytes, @@ -77,12 +75,12 @@ SETTINGS mutations_sync = 1; ALTER TABLE video_log ADD PROJECTION p_agg ( - SELECT + SELECT toStartOfHour(datetime) AS hour, domain, sum(bytes), avg(duration) - GROUP BY + GROUP BY hour, domain ); diff --git a/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference b/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference index 59da8ccad1a..5e7728e0590 100644 --- a/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference +++ b/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference @@ -8,4 +8,4 @@ SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_el 0 [1,2,3] 3 SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value; 1048577 [1048577] -SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> concat(x, elem, ''), ['']) AS unused; -- { serverError 44 } +SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 } diff --git a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql index c7641a3bee0..53606e01ab7 100644 --- a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql +++ b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql @@ -17,7 +17,7 @@ SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_el SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value; -SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> concat(x, elem, ''), ['']) AS unused; -- { serverError 44 } +SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 } -- { echoOff } diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference index d083e178586..60ff2d76995 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference @@ -87,3 +87,4 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 +1 diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql index f20ef412215..eebea322dbf 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql @@ -24,3 +24,5 @@ EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 3 AND b = 'an SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; + +SELECT (k = 3) OR ( (k = 1) OR (k = 2) OR ( (NULL OR 1) = k ) ) FROM ( SELECT materialize(1) AS k ); diff --git a/tests/queries/0_stateless/02668_parse_datetime.reference b/tests/queries/0_stateless/02668_parse_datetime.reference index f6c53ce1887..d21a51ce70c 100644 --- a/tests/queries/0_stateless/02668_parse_datetime.reference +++ b/tests/queries/0_stateless/02668_parse_datetime.reference @@ -243,3 +243,30 @@ select parseDateTime('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Fuzzer crash bug #53715 select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN } +-- %h +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); +2022-08-13 19:58:32 +-- %l accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +2022-08-13 19:58:32 +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +2022-08-13 19:58:32 +-- %H +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); +2022-08-13 07:58:32 +-- %k accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +2022-08-13 07:58:32 +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +2022-08-13 07:58:32 +-- %m +select parseDateTime('8 13, 2022, 7:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); +2022-08-13 07:58:32 +-- %c accepts single or double digits inputs +select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); +2022-08-13 07:58:32 +select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); +2022-08-13 07:58:32 diff --git a/tests/queries/0_stateless/02668_parse_datetime.sql b/tests/queries/0_stateless/02668_parse_datetime.sql index d8f2a94e188..02ac0c5f35c 100644 --- a/tests/queries/0_stateless/02668_parse_datetime.sql +++ b/tests/queries/0_stateless/02668_parse_datetime.sql @@ -168,4 +168,23 @@ select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverE -- Fuzzer crash bug #53715 select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN } +-- %h +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); +-- %l accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +-- %H +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); +-- %k accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +-- %m +select parseDateTime('8 13, 2022, 7:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); +-- %c accepts single or double digits inputs +select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); +select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); + -- { echoOff } diff --git a/tests/queries/0_stateless/02668_ulid_decoding.sql b/tests/queries/0_stateless/02668_ulid_decoding.sql index ecab5004df6..85344bdf49e 100644 --- a/tests/queries/0_stateless/02668_ulid_decoding.sql +++ b/tests/queries/0_stateless/02668_ulid_decoding.sql @@ -1,6 +1,6 @@ -- Tags: no-fasttest -SELECT dateDiff('minute', ULIDStringToDateTime(generateULID()), now()) = 0; +SELECT dateDiff('minute', ULIDStringToDateTime(generateULID()), now()) <= 1; SELECT toTimezone(ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E'), 'America/Costa_Rica'); SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E', 'America/Costa_Rica'); SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9', 'America/Costa_Rica'); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.reference b/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.reference index 986ecffcdf8..2630c5b95b6 100644 --- a/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.reference +++ b/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.reference @@ -13,6 +13,10 @@ Positions: 3 0 1 Header: id_0 UInt64 value_1 String value_2 String + Type: INNER + Strictness: ALL + Algorithm: HashJoin + Clauses: [(id_0) = (id)] Filter (( + (JOIN actions + Change column names to column identifiers))) Header: id_0 UInt64 value_1 String diff --git a/tests/queries/0_stateless/02681_undrop_query.sql b/tests/queries/0_stateless/02681_undrop_query.sql index af5da704ca9..66447fc6c44 100644 --- a/tests/queries/0_stateless/02681_undrop_query.sql +++ b/tests/queries/0_stateless/02681_undrop_query.sql @@ -1,7 +1,6 @@ -- Tags: no-ordinary-database, no-replicated-database, distributed, zookeeper set database_atomic_wait_for_drop_and_detach_synchronously = 0; -set allow_experimental_undrop_table_query = 1; select 'test MergeTree undrop'; drop table if exists 02681_undrop_mergetree sync; diff --git a/tests/queries/0_stateless/02681_undrop_query_uuid.sh b/tests/queries/0_stateless/02681_undrop_query_uuid.sh index a93f30ef459..283cbeb20ad 100755 --- a/tests/queries/0_stateless/02681_undrop_query_uuid.sh +++ b/tests/queries/0_stateless/02681_undrop_query_uuid.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "create table 02681_u ${CLICKHOUSE_CLIENT} -q "insert into 02681_undrop_uuid values (1),(2),(3);" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "drop table 02681_undrop_uuid on cluster test_shard_localhost settings database_atomic_wait_for_drop_and_detach_synchronously = 0;" ${CLICKHOUSE_CLIENT} -q "select table from system.dropped_tables where table = '02681_undrop_uuid' limit 1;" -${CLICKHOUSE_CLIENT} -q "undrop table 02681_undrop_uuid UUID '$uuid2' settings allow_experimental_undrop_table_query = 1;" 2>&1| grep -Faq "UNKNOWN_TABLE" && echo OK -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "undrop table 02681_undrop_uuid UUID '$uuid' on cluster test_shard_localhost settings allow_experimental_undrop_table_query = 1;" +${CLICKHOUSE_CLIENT} -q "undrop table 02681_undrop_uuid UUID '$uuid2';" 2>&1| grep -Faq "UNKNOWN_TABLE" && echo OK +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "undrop table 02681_undrop_uuid UUID '$uuid' on cluster test_shard_localhost;" ${CLICKHOUSE_CLIENT} -q "select * from 02681_undrop_uuid order by id;" ${CLICKHOUSE_CLIENT} -q "drop table 02681_undrop_uuid sync;" diff --git a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference index abe891cbb9b..81f1bdda20c 100644 --- a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference +++ b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference @@ -1,4 +1,4 @@ -test_dictionary_hashed 1000000 0.4768 33558760 -test_dictionary_hashed_load_factor 1000000 0.9537 16781544 -test_dictionary_sparse_hashed 1000000 0.4768 20975848 -test_dictionary_sparse_hashed_load_factor 1000000 0.9537 10490088 +test_dictionary_hashed 1000000 0.4768 34000000 +test_dictionary_hashed_load_factor 1000000 0.9537 17000000 +test_dictionary_sparse_hashed 1000000 0.4768 21000000 +test_dictionary_sparse_hashed_load_factor 1000000 0.9537 10000000 diff --git a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 index 870acd54514..41d68216412 100644 --- a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 +++ b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 @@ -31,7 +31,7 @@ LIFETIME(0); SYSTEM RELOAD DICTIONARY test_dictionary_{{layout}}; SYSTEM RELOAD DICTIONARY test_dictionary_{{layout}}_load_factor; -SELECT name, element_count, round(load_factor, 4), bytes_allocated FROM system.dictionaries WHERE database = currentDatabase() ORDER BY name; +SELECT name, element_count, round(load_factor, 4), round(bytes_allocated, -6) FROM system.dictionaries WHERE database = currentDatabase() ORDER BY name; DROP DICTIONARY IF EXISTS test_dictionary_{{layout}}; DROP DICTIONARY IF EXISTS test_dictionary_{{layout}}_load_factor; diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference new file mode 100644 index 00000000000..9fa5612e7cd --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference @@ -0,0 +1,314 @@ +SELECT * FROM 02763_merge_log_1 ORDER BY x +1 11 +2 12 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 +SETTINGS optimize_move_to_prewhere= 0 +SELECT * FROM 02763_merge_log_1 +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log_1) +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) +1 11 +2 12 +3 13 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2 +3 13 +3 13 +4 14 +SELECT * FROM 02763_merge_merge_1 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1) +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) +1 11 +2 12 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM engine_merge_12 WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +aaa 6 39 +aaa 6 39 +aaa 6 39 +aaa 8 42 +aaa 8 42 +aaa 8 42 +3 +3 +3 +4 +4 +4 +SELECT * FROM merge(...) LEFT JOIN merge(...) +3 13 13 +3 13 13 +4 14 14 +4 14 14 +SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x +1 22 +2 24 +3 39 +4 42 +1 11 0 +2 12 0 +3 13 0 +4 14 1 +4 14 1 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +2 12 0 +3 13 1 +3 13 1 +4 14 1 +4 14 1 +SELECT y from merge(currentDatabase(), 02763_merge) +11 +11 +12 +12 +13 +13 +13 +13 +14 +14 +14 +14 +02763_merge_fancycols +SELECT * +SELECT x, lc +SELECT * +1 11 111 111 42 +1 11 111 111 42 +SELECT x, lc +1 111 +1 111 +SELECT x, lc, cnst +1 111 42 +1 111 42 +SELECT x, y from merge(currentDatabase(), 02763_merge +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 +SETTINGS optimize_move_to_prewhere= 1 +SELECT * FROM 02763_merge_log_1 +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log_1) +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) +1 11 +2 12 +3 13 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2 +3 13 +3 13 +4 14 +SELECT * FROM 02763_merge_merge_1 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1) +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) +1 11 +2 12 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM engine_merge_12 WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +aaa 6 39 +aaa 6 39 +aaa 6 39 +aaa 8 42 +aaa 8 42 +aaa 8 42 +3 +3 +3 +4 +4 +4 +SELECT * FROM merge(...) LEFT JOIN merge(...) +3 13 13 +3 13 13 +4 14 14 +4 14 14 +SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x +1 22 +2 24 +3 39 +4 42 +1 11 0 +2 12 0 +3 13 0 +4 14 1 +4 14 1 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +2 12 0 +3 13 1 +3 13 1 +4 14 1 +4 14 1 +SELECT y from merge(currentDatabase(), 02763_merge) +11 +11 +12 +12 +13 +13 +13 +13 +14 +14 +14 +14 +02763_merge_fancycols +SELECT * +SELECT x, lc +SELECT * +1 11 111 111 42 +1 11 111 111 42 +SELECT x, lc +1 111 +1 111 +SELECT x, lc, cnst +1 111 42 +1 111 42 +SELECT x, y from merge(currentDatabase(), 02763_merge +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 new file mode 100644 index 00000000000..0263e1a974f --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 @@ -0,0 +1,143 @@ +DROP TABLE IF EXISTS 02763_merge_log_1; +DROP TABLE IF EXISTS 02763_merge_log_2; +DROP TABLE IF EXISTS 02763_merge_merge_1; +DROP TABLE IF EXISTS 02763_merge_merge_2; +DROP TABLE IF EXISTS 02763_merge_fancycols; +DROP ROW POLICY IF EXISTS 02763_filter_1 ON 02763_merge_log_1; +DROP ROW POLICY IF EXISTS 02763_filter_2 ON 02763_merge_merge_1; +DROP ROW POLICY IF EXISTS 02763_filter_3 ON 02763_merge_log_1; +DROP ROW POLICY IF EXISTS 02763_filter_4 ON 02763_merge_merge_1; +DROP ROW POLICY IF EXISTS 02763_filter_5 ON 02763_merge_fancycols; +DROP ROW POLICY IF EXISTS 02763_filter_6 ON 02763_merge_fancycols; + + +CREATE TABLE 02763_merge_log_1 (x UInt8, y UInt64) ENGINE = Log; +CREATE TABLE 02763_merge_log_2 (x UInt8, y UInt64) ENGINE = Log; + +CREATE TABLE 02763_merge_merge_1 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x; +CREATE TABLE 02763_merge_merge_2 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x; + +CREATE TABLE 02763_engine_merge_12 (x UInt8, y UInt64) ENGINE = Merge(currentDatabase(), '02763_merge_merge'); + +INSERT INTO 02763_merge_log_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_log_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_merge_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_merge_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14); + +SELECT 'SELECT * FROM 02763_merge_log_1 ORDER BY x'; +SELECT * FROM 02763_merge_log_1 ORDER BY x; + +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x'; +SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x; + + +{% for prew in [0 , 1] -%} + +SELECT 'SETTINGS optimize_move_to_prewhere= {{prew}}'; + +CREATE ROW POLICY 02763_filter_1 ON 02763_merge_log_1 USING x=3 AS permissive TO ALL; + +SELECT 'SELECT * FROM 02763_merge_log_1'; +SELECT * FROM 02763_merge_log_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log_1)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_2 ON 02763_merge_merge_1 USING x=4 AS permissive TO ALL; + +SELECT 'SELECT * FROM 02763_merge_merge_1'; +SELECT * FROM 02763_merge_merge_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + + +SELECT 'SELECT * FROM engine_merge_12 WHERE x>2'; +SELECT * FROM 02763_engine_merge_12 WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + + +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge)'; +SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'aaa', x*2 as x_2, y*3 as y_3 FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x_2 SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT x FROM (SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x IN (3,4)) ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT * FROM merge(...) LEFT JOIN merge(...)'; +SELECT * FROM merge(currentDatabase(), '02763_merge.*1') as a +LEFT JOIN +merge(currentDatabase(), '02763_merge.*2') as b +USING (x) +ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...)'; +SELECT * FROM +( +SELECT * FROM merge(currentDatabase(), '02763_merge.*1') +UNION ALL +SELECT * FROM merge(currentDatabase(), '02763_merge.*2') +) +ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x'; +SELECT x, SUM(y) FROM +(SELECT * FROM merge(currentDatabase(), '02763_merge.*1') +UNION ALL +SELECT * FROM merge(currentDatabase(), '02763_merge.*2')) +GROUP BY x +ORDER BY x; + +SELECT *, x=4 FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_3 ON 02763_merge_log_1 USING y>12 AS permissive TO ALL; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_4 ON 02763_merge_merge_1 USING y>12 AS permissive TO ALL; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT *, (x=4 OR y>12) FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT y from merge(currentDatabase(), 02763_merge)'; +SELECT y from merge(currentDatabase(), '02763_merge') ORDER BY y SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT '02763_merge_fancycols'; +CREATE TABLE 02763_merge_fancycols (x UInt8, y Nullable(UInt64), z String DEFAULT CONCAT(toString(x), toString(y)), lc LowCardinality(String) DEFAULT z, cnst UInt32 MATERIALIZED 42) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO 02763_merge_fancycols (x, y) SELECT x, y from merge(currentDatabase(), '02763_merge'); + +CREATE ROW POLICY 02763_filter_5 ON 02763_merge_fancycols USING cnst<>42 AS permissive TO ALL; +SELECT 'SELECT *'; +SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc'; +SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_6 ON 02763_merge_fancycols USING lc='111' AS permissive TO ALL; +SELECT 'SELECT *'; +SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc'; +SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc, cnst'; +SELECT x, lc, cnst from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, y from merge(currentDatabase(), 02763_merge'; +SELECT x, y from merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +DROP TABLE 02763_merge_fancycols; + +DROP ROW POLICY 02763_filter_1 ON 02763_merge_log_1; +DROP ROW POLICY 02763_filter_2 ON 02763_merge_merge_1; + +DROP ROW POLICY 02763_filter_3 ON 02763_merge_log_1; +DROP ROW POLICY 02763_filter_4 ON 02763_merge_merge_1; + +DROP ROW POLICY 02763_filter_5 ON 02763_merge_fancycols; +DROP ROW POLICY 02763_filter_6 ON 02763_merge_fancycols; + +{% endfor %} diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference new file mode 100644 index 00000000000..56bfdbe0b18 --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference @@ -0,0 +1,49 @@ +02763_merge_aliases +x, y, z FROM 02763_a_merge +3 13 16 +4 14 18 +* FROM 02763_a_merge +3 13 16 +4 14 18 +x, y FROM 02763_a_merge +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +2 12 +3 13 +4 14 +SELECT x FROM merge(currentDatabase(), 02763_alias) +12 +13 +14 +SELECT y FROM merge(currentDatabase(), 02763_alias) +2 +3 +4 +x, y, z FROM 02763_a_merge +3 13 16 +4 14 18 +* FROM 02763_a_merge +3 13 16 +4 14 18 +x, y FROM 02763_a_merge +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +2 12 +3 13 +4 14 +SELECT x FROM merge(currentDatabase(), 02763_alias) +12 +13 +14 +SELECT y FROM merge(currentDatabase(), 02763_alias) +2 +3 +4 diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 new file mode 100644 index 00000000000..bdd456951dd --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS 02763_alias; +DROP TABLE IF EXISTS 02763_a_merge; + + +SELECT '02763_merge_aliases'; +CREATE TABLE 02763_alias (x UInt8, y UInt64, z UInt64 ALIAS plus(x,y)) ENGINE = MergeTree ORDER BY x; +INSERT INTO 02763_alias VALUES (1, 11), (2, 12), (3, 13), (4, 14); + +CREATE ROW POLICY 02763_filter_7 ON 02763_alias USING z>15 AS permissive TO ALL; + +CREATE TABLE 02763_a_merge (x UInt8, y UInt64, z UInt64) ENGINE = Merge(currentDatabase(), '02763_alias'); + +{% for prew in [0 , 1] -%} + + + +SELECT 'x, y, z FROM 02763_a_merge'; +SELECT x, y, z FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT '* FROM 02763_a_merge'; +SELECT * FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'x, y FROM 02763_a_merge'; +SELECT x, y FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_8 ON 02763_alias USING y>11 AS permissive TO ALL; + +SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x FROM merge(currentDatabase(), 02763_alias)'; +SELECT y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +DROP ROW POLICY 02763_filter_8 ON 02763_alias; +{% endfor %} + +DROP TABLE 02763_alias; +DROP TABLE 02763_a_merge; + +DROP ROW POLICY 02763_filter_7 ON 02763_alias; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql index 89073bd2943..3bbcbb1a535 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql @@ -5,7 +5,7 @@ -- Tests the output of SHOW COLUMNS when called through the ClickHouse protocol. -- ----------------------------------------------------------------------------------- --- Please keep this test in-sync with 02775_show_columns_called_through_mysql.sql +-- Please keep this test in-sync with 02775_show_columns_called_from_clickhouse.expect -- ----------------------------------------------------------------------------------- DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect index bef5bd10ff3..8ba5774820e 100755 --- a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect +++ b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect @@ -6,7 +6,7 @@ # Tests the output of SHOW COLUMNS when called through the MySQL protocol. # ----------------------------------------------------------------------------------- -# Please keep this test in-sync with 02775_show_columns_called_through_clickhouse.sql +# Please keep this test in-sync with 02775_show_columns_called_from_clickhouse.sql # ----------------------------------------------------------------------------------- set basedir [file dirname $argv0] diff --git a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference b/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference deleted file mode 100644 index 025c04af1da..00000000000 --- a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference +++ /dev/null @@ -1,124 +0,0 @@ --- { echoOn } -SET optimize_move_functions_out_of_any = 1; -EXPLAIN QUERY TREE SELECT any(number + number * 2) FROM numbers(1, 2); -QUERY id: 0 - PROJECTION COLUMNS - any(plus(number, multiply(number, 2))) UInt64 - PROJECTION - LIST id: 1, nodes: 1 - FUNCTION id: 2, function_name: plus, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 3, nodes: 2 - FUNCTION id: 4, function_name: any, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 5, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - FUNCTION id: 8, function_name: multiply, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 9, nodes: 2 - FUNCTION id: 10, function_name: any, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 11, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - CONSTANT id: 12, constant_value: UInt64_2, constant_value_type: UInt8 - JOIN TREE - TABLE_FUNCTION id: 7, table_function_name: numbers - ARGUMENTS - LIST id: 13, nodes: 2 - CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 - CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8 -SELECT any(number + number * 2) FROM numbers(1, 2); -3 -EXPLAIN QUERY TREE SELECT anyLast(number + number * 2) FROM numbers(1, 2); -QUERY id: 0 - PROJECTION COLUMNS - anyLast(plus(number, multiply(number, 2))) UInt64 - PROJECTION - LIST id: 1, nodes: 1 - FUNCTION id: 2, function_name: plus, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 3, nodes: 2 - FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 5, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - FUNCTION id: 8, function_name: multiply, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 9, nodes: 2 - FUNCTION id: 10, function_name: anyLast, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 11, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - CONSTANT id: 12, constant_value: UInt64_2, constant_value_type: UInt8 - JOIN TREE - TABLE_FUNCTION id: 7, table_function_name: numbers - ARGUMENTS - LIST id: 13, nodes: 2 - CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 - CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8 -SELECT anyLast(number + number * 2) FROM numbers(1, 2); -6 -EXPLAIN QUERY TREE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -QUERY id: 0 - PROJECTION COLUMNS - x UInt64 - PROJECTION - LIST id: 1, nodes: 1 - FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 3, nodes: 2 - FUNCTION id: 4, function_name: any, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 5, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8 - JOIN TREE - TABLE_FUNCTION id: 7, table_function_name: numbers - ARGUMENTS - LIST id: 9, nodes: 2 - CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8 - CONSTANT id: 11, constant_value: UInt64_2, constant_value_type: UInt8 -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -3 -EXPLAIN QUERY TREE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -QUERY id: 0 - PROJECTION COLUMNS - x UInt64 - x UInt64 - PROJECTION - LIST id: 1, nodes: 2 - FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 3, nodes: 2 - FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 5, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8 - FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64 - ARGUMENTS - LIST id: 3, nodes: 2 - FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 5, nodes: 1 - COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8 - JOIN TREE - TABLE_FUNCTION id: 7, table_function_name: numbers - ARGUMENTS - LIST id: 9, nodes: 2 - CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8 - CONSTANT id: 11, constant_value: UInt64_2, constant_value_type: UInt8 -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -6 6 -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } -SET optimize_move_functions_out_of_any = 0; -SELECT any(number + number * 2) FROM numbers(1, 2); -3 -SELECT anyLast(number + number * 2) FROM numbers(1, 2); -6 -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -3 -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -6 6 -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } diff --git a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql b/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql deleted file mode 100644 index c9707d10fde..00000000000 --- a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql +++ /dev/null @@ -1,33 +0,0 @@ -SET allow_experimental_analyzer = 1; - --- { echoOn } -SET optimize_move_functions_out_of_any = 1; - -EXPLAIN QUERY TREE SELECT any(number + number * 2) FROM numbers(1, 2); -SELECT any(number + number * 2) FROM numbers(1, 2); - -EXPLAIN QUERY TREE SELECT anyLast(number + number * 2) FROM numbers(1, 2); -SELECT anyLast(number + number * 2) FROM numbers(1, 2); - -EXPLAIN QUERY TREE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); - -EXPLAIN QUERY TREE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); - -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } - - - -SET optimize_move_functions_out_of_any = 0; - -SELECT any(number + number * 2) FROM numbers(1, 2); - -SELECT anyLast(number + number * 2) FROM numbers(1, 2); - -WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); - -SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); - -SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } --- { echoOff } diff --git a/tests/queries/0_stateless/02813_series_period_detect.reference b/tests/queries/0_stateless/02813_series_period_detect.reference new file mode 100644 index 00000000000..f72e8498f31 --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.reference @@ -0,0 +1,5 @@ +14 +3 +3 +3 +0 diff --git a/tests/queries/0_stateless/02813_series_period_detect.sql b/tests/queries/0_stateless/02813_series_period_detect.sql new file mode 100644 index 00000000000..e860fd75923 --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SELECT seriesPeriodDetectFFT([139, 87, 110, 68, 54, 50, 51, 53, 133, 86, 141, 97, 156, 94, 149, 95, 140, 77, 61, 50, 54, 47, 133, 72, 152, 94, 148, 105, 162, 101, 160, 87, 63, 53, 55, 54, 151, 103, 189, 108, 183, 113, 175, 113, 178, 90, 71, 62, 62, 65, 165, 109, 181, 115, 182, 121, 178, 114, 170]); +SELECT seriesPeriodDetectFFT([10,20,30,10,20,30,10,20,30, 10,20,30,10,20,30,10,20,30,10,20,30]); +SELECT seriesPeriodDetectFFT([10.1, 20.45, 40.34, 10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34, 10.1, 20.45, 40.34]); +SELECT seriesPeriodDetectFFT([10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400]); +SELECT seriesPeriodDetectFFT([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); +SELECT seriesPeriodDetectFFT([1,2,3]); -- { serverError BAD_ARGUMENTS} +SELECT seriesPeriodDetectFFT(); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesPeriodDetectFFT([]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([NULL, NULL, NULL]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([10,20,30,10,202,30,NULL]); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/tests/queries/0_stateless/02815_join_algorithm_setting.reference b/tests/queries/0_stateless/02815_join_algorithm_setting.reference new file mode 100644 index 00000000000..94999dab6b5 --- /dev/null +++ b/tests/queries/0_stateless/02815_join_algorithm_setting.reference @@ -0,0 +1,13 @@ +1 +1 0 +1 +1 0 +1 +0 1 +1 +0 1 +1 0 +1 +1 0 +1 0 +0 1 diff --git a/tests/queries/0_stateless/02815_join_algorithm_setting.sql b/tests/queries/0_stateless/02815_join_algorithm_setting.sql new file mode 100644 index 00000000000..a4c24bb60f9 --- /dev/null +++ b/tests/queries/0_stateless/02815_join_algorithm_setting.sql @@ -0,0 +1,96 @@ +-- Tags: use-rocksdb + +DROP TABLE IF EXISTS rdb; +DROP TABLE IF EXISTS t2; + +CREATE TABLE rdb ( `key` UInt32, `value` String ) +ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO rdb VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e'); + +CREATE TABLE t2 ( `k` UInt16 ) ENGINE = TinyLog; +INSERT INTO t2 VALUES (4), (5), (6); + +SELECT value == 'default' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'direct, hash'; + +SELECT value == 'direct,hash' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'hash, direct'; + +SELECT value == 'hash,direct' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'grace_hash,hash'; + +SELECT value == 'grace_hash,hash' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: GraceHashJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 + JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key OR t1.key2 = t2.key2 +); + +SELECT countIf(explain like '%Algorithm: GraceHashJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 + JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key +); + +SET join_algorithm = 'grace_hash, hash, auto'; + +SELECT value = 'grace_hash,hash,auto' FROM system.settings WHERE name = 'join_algorithm'; + + +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS src; + +CREATE TABLE src (id UInt64, s String) ENGINE = MergeTree ORDER BY id +AS SELECT number, toString(number) FROM numbers(1000000); + +CREATE DICTIONARY dict( + id UInt64, + s String +) PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'src' DB currentDatabase())) +LIFETIME (MIN 0 MAX 0) +LAYOUT(HASHED()); + +SET join_algorithm = 'default'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); + +SET join_algorithm = 'direct,hash'; +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); + +SET join_algorithm = 'hash,direct'; +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); diff --git a/tests/queries/0_stateless/02815_range_dict_no_direct_join.sql b/tests/queries/0_stateless/02815_range_dict_no_direct_join.sql index e3af53fa335..6ed195cf22c 100644 --- a/tests/queries/0_stateless/02815_range_dict_no_direct_join.sql +++ b/tests/queries/0_stateless/02815_range_dict_no_direct_join.sql @@ -30,5 +30,6 @@ RANGE(MIN discount_start_date MAX discount_end_date); CREATE TABLE ids (id UInt64) ENGINE = Memory; INSERT INTO ids SELECT * FROM numbers(10); -SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'direct'; -SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS allow_experimental_analyzer = 1; +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'direct,hash'; +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'default'; +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'direct'; -- { serverError NOT_IMPLEMENTED } diff --git a/tests/queries/0_stateless/02841_group_array_sorted.reference b/tests/queries/0_stateless/02841_group_array_sorted.reference new file mode 100644 index 00000000000..1043f949590 --- /dev/null +++ b/tests/queries/0_stateless/02841_group_array_sorted.reference @@ -0,0 +1,12 @@ +[0,1,2,3,4] +[0,1,2,3,4] +[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99] +['0','1','10','11','12','13','14','15','16','17','18','19','2','20','21','22','23','24','25','26','27','28','29','3','4','5','6','7','8','9'] +[0,0,1,1,2,2,3,3,4,4] +[[1,2,3,4],[2,3,4,5],[3,4,5,6]] +[(2,1),(15,25),(30,60),(100,200)] +[0.2,2.2,6.6,12.5] +['AAA','Aaa','aaa','abc','bbc'] +1000000 +1000000 +[0,1] diff --git a/tests/queries/0_stateless/02841_group_array_sorted.sql b/tests/queries/0_stateless/02841_group_array_sorted.sql new file mode 100644 index 00000000000..a8cd6791ff3 --- /dev/null +++ b/tests/queries/0_stateless/02841_group_array_sorted.sql @@ -0,0 +1,41 @@ +SELECT groupArraySorted(5)(number) FROM numbers(100); + +SELECT groupArraySorted(10)(number) FROM numbers(5); + +SELECT groupArraySorted(100)(number) FROM numbers(1000); + +SELECT groupArraySorted(30)(str) FROM (SELECT toString(number) as str FROM numbers(30)); + +SELECT groupArraySorted(10)(toInt64(number/2)) FROM numbers(100); + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Array(UInt64)) engine=MergeTree ORDER BY a; +INSERT INTO test VALUES ([3,4,5,6]), ([1,2,3,4]), ([2,3,4,5]); +SELECT groupArraySorted(3)(a) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data Tuple(Int32, Int32)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, (100, 200)), (2, (15, 25)), (3, (2, 1)), (4, (30, 60)); +SELECT groupArraySorted(4)(data) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data Decimal32(2)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, 12.5), (2, 0.2), (3, 6.6), (4, 2.2); +SELECT groupArraySorted(4)(data) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data FixedString(3)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, 'AAA'), (2, 'bbc'), (3, 'abc'), (4, 'aaa'), (5, 'Aaa'); +SELECT groupArraySorted(5)(data) FROM test; +DROP TABLE test; + +CREATE TABLE test (id Decimal(76, 53), str String) ENGINE = MergeTree ORDER BY id; +INSERT INTO test SELECT number, 'test' FROM numbers(1000000); +SELECT count(id) FROM test; +SELECT count(concat(toString(id), 'a')) FROM test; +DROP TABLE test; + +CREATE TABLE test (id UInt64, agg AggregateFunction(groupArraySorted(2), UInt64)) engine=MergeTree ORDER BY id; +INSERT INTO test SELECT 1, groupArraySortedState(2)(number) FROM numbers(10); +SELECT groupArraySortedMerge(2)(agg) FROM test; +DROP TABLE test; diff --git a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference index 428ba88bff0..df8198bc856 100644 --- a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference +++ b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference @@ -6,3 +6,5 @@ 1 1 1 1 +1 1 +1 1 diff --git a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql index a5ddf830d48..a299e50984f 100644 --- a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql +++ b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql @@ -15,4 +15,8 @@ SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 O SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key; SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key; +SET join_algorithm = 'grace_hash'; + +SELECT * FROM (SELECT key AS a FROM t1 ) t1 INNER JOIN (SELECT key AS c FROM t1 ) t2 ON c = a WHERE a; + DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/02864_statistic_exception.reference b/tests/queries/0_stateless/02864_statistic_exception.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02864_statistic_exception.sql b/tests/queries/0_stateless/02864_statistic_exception.sql new file mode 100644 index 00000000000..c37f6b1ce06 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistic_exception.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 +( + a Float64 STATISTIC(tdigest), + b Int64 STATISTIC(tdigest), + pk String, +) Engine = MergeTree() ORDER BY pk; -- { serverError INCORRECT_QUERY } + +SET allow_experimental_statistic = 1; + +CREATE TABLE t1 +( + a Float64 STATISTIC(tdigest), + b Int64, + pk String STATISTIC(tdigest), +) Engine = MergeTree() ORDER BY pk; -- { serverError ILLEGAL_STATISTIC } + +CREATE TABLE t1 +( + a Float64 STATISTIC(tdigest, tdigest(10)), + b Int64, +) Engine = MergeTree() ORDER BY pk; -- { serverError INCORRECT_QUERY } + +CREATE TABLE t1 +( + a Float64 STATISTIC(xyz), + b Int64, +) Engine = MergeTree() ORDER BY pk; -- { serverError INCORRECT_QUERY } + +CREATE TABLE t1 +( + a Float64, + b Int64, + pk String, +) Engine = MergeTree() ORDER BY pk; + +ALTER TABLE t1 ADD STATISTIC a TYPE xyz; -- { serverError INCORRECT_QUERY } +ALTER TABLE t1 ADD STATISTIC a TYPE tdigest; +ALTER TABLE t1 ADD STATISTIC a TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } +ALTER TABLE t1 ADD STATISTIC pk TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } +ALTER TABLE t1 DROP STATISTIC b TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } +ALTER TABLE t1 DROP STATISTIC a TYPE tdigest; +ALTER TABLE t1 DROP STATISTIC a TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } +ALTER TABLE t1 CLEAR STATISTIC a TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } +ALTER TABLE t1 MATERIALIZE STATISTIC b TYPE tdigest; -- { serverError ILLEGAL_STATISTIC } + +ALTER TABLE t1 ADD STATISTIC a TYPE tdigest; +ALTER TABLE t1 ADD STATISTIC b TYPE tdigest; +ALTER TABLE t1 MODIFY COLUMN a Float64 TTL now() + INTERVAL 1 MONTH; +ALTER TABLE t1 MODIFY COLUMN a Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +DROP TABLE t1; diff --git a/tests/queries/0_stateless/02864_statistic_operate.reference b/tests/queries/0_stateless/02864_statistic_operate.reference new file mode 100644 index 00000000000..7fad7c810c1 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistic_operate.reference @@ -0,0 +1,31 @@ +CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `b` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After insert +SELECT count() +FROM t1 +PREWHERE (a < 10) AND (b < 10) +10 +0 +After drop statistic +SELECT count() +FROM t1 +PREWHERE (b < 10) AND (a < 10) +10 +CREATE TABLE default.t1\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After add statistic +CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `b` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After materialize statistic +SELECT count() +FROM t1 +PREWHERE (a < 10) AND (b < 10) +20 +After merge +SELECT count() +FROM t1 +PREWHERE (a < 10) AND (b < 10) +20 +CREATE TABLE default.t1\n(\n `a` Float64 STATISTIC(tdigest),\n `c` Int64 STATISTIC(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After rename +SELECT count() +FROM t1 +PREWHERE (a < 10) AND (c < 10) +20 diff --git a/tests/queries/0_stateless/02864_statistic_operate.sql b/tests/queries/0_stateless/02864_statistic_operate.sql new file mode 100644 index 00000000000..29bd213f04a --- /dev/null +++ b/tests/queries/0_stateless/02864_statistic_operate.sql @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1; + +SET allow_experimental_statistic = 1; +SET allow_statistic_optimize = 1; + +CREATE TABLE t1 +( + a Float64 STATISTIC(tdigest), + b Int64 STATISTIC(tdigest), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'After insert'; +EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10; +SELECT count(*) FROM t1 WHERE b < 10 and a < 10; +SELECT count(*) FROM t1 WHERE b < NULL and a < '10'; + +ALTER TABLE t1 DROP STATISTIC a, b TYPE tdigest; + +SELECT 'After drop statistic'; +EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10; +SELECT count(*) FROM t1 WHERE b < 10 and a < 10; + +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ADD STATISTIC a, b TYPE tdigest; + +SELECT 'After add statistic'; + +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MATERIALIZE STATISTIC a, b TYPE tdigest; +INSERT INTO t1 select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'After materialize statistic'; +EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10; +SELECT count(*) FROM t1 WHERE b < 10 and a < 10; + +OPTIMIZE TABLE t1 FINAL; + +SELECT 'After merge'; +EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE b < 10 and a < 10; +SELECT count(*) FROM t1 WHERE b < 10 and a < 10; + +ALTER TABLE t1 RENAME COLUMN b TO c; +SHOW CREATE TABLE t1; + +SELECT 'After rename'; +EXPLAIN SYNTAX SELECT count(*) FROM t1 WHERE c < 10 and a < 10; +SELECT count(*) FROM t1 WHERE c < 10 and a < 10; + +DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/02888_obsolete_settings.reference b/tests/queries/0_stateless/02888_obsolete_settings.reference index 63553092c0c..dbf781f183f 100644 --- a/tests/queries/0_stateless/02888_obsolete_settings.reference +++ b/tests/queries/0_stateless/02888_obsolete_settings.reference @@ -5,6 +5,7 @@ allow_experimental_database_atomic allow_experimental_geo_types allow_experimental_map_type allow_experimental_query_cache +allow_experimental_undrop_table_query allow_experimental_window_functions async_insert_cleanup_timeout_ms async_insert_stale_timeout_ms @@ -40,6 +41,7 @@ multiple_joins_rewriter_version odbc_max_field_size optimize_duplicate_order_by_and_distinct optimize_fuse_sum_count_avg +optimize_move_functions_out_of_any parallel_replicas_min_number_of_granules_to_enable partial_merge_join_optimizations query_cache_store_results_of_queries_with_nondeterministic_functions diff --git a/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.reference b/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.reference new file mode 100644 index 00000000000..a27bd812903 --- /dev/null +++ b/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.reference @@ -0,0 +1,2 @@ +1010559 +1010559 diff --git a/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.sh b/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.sh new file mode 100755 index 00000000000..42dde18de00 --- /dev/null +++ b/tests/queries/0_stateless/02892_input_csv_cr_end_count_many_rows.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# NOTE: this sh wrapper is required because of shell_config + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +USER_FILES_PATH=$($CLICKHOUSE_CLIENT --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep -E '^Code: 107.*FILE_DOESNT_EXIST' | head -1 | awk '{gsub("/nonexist.txt","",$9); print $9}') + +cp "$CURDIR"/data_csv/1m_rows_cr_end_of_line.csv.xz $USER_FILES_PATH/ + +$CLICKHOUSE_CLIENT -q "SELECT count(1) from file('1m_rows_cr_end_of_line.csv.xz') settings input_format_csv_allow_cr_end_of_line=1, optimize_count_from_files=1" +$CLICKHOUSE_CLIENT -q "SELECT count(1) from file('1m_rows_cr_end_of_line.csv.xz') settings input_format_csv_allow_cr_end_of_line=1, optimize_count_from_files=0" + +rm $USER_FILES_PATH/1m_rows_cr_end_of_line.csv.xz \ No newline at end of file diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference index 9289ddcee34..4598404dd40 100644 --- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference +++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference @@ -1 +1,10 @@ +-- { echoOn } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1; 121 +SET optimize_trivial_approximate_count_query = 1; +-- needs more data to see total_bytes or just detach and attach the table +DETACH TABLE dict SYNC; +ATTACH TABLE dict; +SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV; +121,1 diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql index 0cdf2d1b2b2..a770b153760 100644 --- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql +++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql @@ -2,5 +2,11 @@ CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; INSERT INTO dict SELECT number, toString(number) FROM numbers(121); +-- { echoOn } SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS } SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1; +SET optimize_trivial_approximate_count_query = 1; +-- needs more data to see total_bytes or just detach and attach the table +DETACH TABLE dict SYNC; +ATTACH TABLE dict; +SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV; diff --git a/tests/queries/0_stateless/02896_memory_accounting_for_user.sh b/tests/queries/0_stateless/02896_memory_accounting_for_user.sh index 72f4be1475d..f3016671420 100755 --- a/tests/queries/0_stateless/02896_memory_accounting_for_user.sh +++ b/tests/queries/0_stateless/02896_memory_accounting_for_user.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel, long +# Tags: no-parallel, long, no-random-settings CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.reference b/tests/queries/0_stateless/02899_distributed_limit_by.reference new file mode 100644 index 00000000000..c20ecbcc4e4 --- /dev/null +++ b/tests/queries/0_stateless/02899_distributed_limit_by.reference @@ -0,0 +1,52 @@ +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=1 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=1 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 new file mode 100644 index 00000000000..4f885ef2b6c --- /dev/null +++ b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 @@ -0,0 +1,26 @@ +{# +Randomize settings: +- prefer_localhost_replica +- distributed_group_by_no_merge (0 = WithMergeableState, 1 = Complete, 2 = WithMergeableStateAfterAggregation/WithMergeableStateAfterAggregationAndLimit) +- distributed_push_down_limit (0/1 = dis/allows WithMergeableStateAfterAggregationAndLimit +#} +{% for settings in product( + [ + 'prefer_localhost_replica=0', + 'prefer_localhost_replica=1', + ], + [ + 'distributed_group_by_no_merge=0', + 'distributed_group_by_no_merge=1', + 'distributed_group_by_no_merge=2', + ], + [ + 'distributed_push_down_limit=1', + 'distributed_push_down_limit=0', + ], +) %} +{% set settings = settings | join(',') %} +select 'Used settings: {{ settings }}'; +select dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by dummy + dummy + 0 as l settings {{ settings }}; +select dummy from (select dummy + dummy + 0 as l, dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by l) settings {{ settings }}; +{% endfor %} diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.reference b/tests/queries/0_stateless/02900_limit_by_query_stage.reference new file mode 100644 index 00000000000..b01fb1ca5b0 --- /dev/null +++ b/tests/queries/0_stateless/02900_limit_by_query_stage.reference @@ -0,0 +1,3 @@ +0 0 +0 0 +0 0 diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.sh b/tests/queries/0_stateless/02900_limit_by_query_stage.sh new file mode 100755 index 00000000000..d34d0d81bcd --- /dev/null +++ b/tests/queries/0_stateless/02900_limit_by_query_stage.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --stage with_mergeable_state --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' +$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' +$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation_and_limit --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' diff --git a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference new file mode 100644 index 00000000000..6ee8d0c3d23 --- /dev/null +++ b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference @@ -0,0 +1,12 @@ +CreateQuery numbers_pv (children 2) + Identifier numbers_pv + SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 3) + ExpressionList (children 1) + Asterisk + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + TableIdentifier numbers + QueryParameter amount:UInt8 diff --git a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql new file mode 100644 index 00000000000..6af6dab2f4e --- /dev/null +++ b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql @@ -0,0 +1,3 @@ +EXPLAIN AST +CREATE VIEW numbers_pv AS +SELECT * FROM numbers LIMIT {amount:UInt8}; \ No newline at end of file diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh index 074a3a6725e..095239954f4 100755 --- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh +++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh @@ -10,7 +10,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # (i.e. "No active replica has part X or covering part") # does not appears as errors (level=Error), only as info message (level=Information). -$CLICKHOUSE_CLIENT -nm -q " +cluster=default +if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where cluster = 'test_cluster_database_replicated'") = 1 ]]; then + cluster=test_cluster_database_replicated +fi + +$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q " drop table if exists rmt1; drop table if exists rmt2; @@ -21,7 +26,12 @@ $CLICKHOUSE_CLIENT -nm -q " insert into rmt1 values (2); system sync replica rmt1; - system stop pulling replication log rmt2; + -- SYSTEM STOP PULLING REPLICATION LOG does not waits for the current pull, + -- trigger it explicitly to 'avoid race' (though proper way will be to wait + -- for current pull in the StorageReplicatedMergeTree::getActionLock()) + system sync replica rmt2; + -- NOTE: CLICKHOUSE_DATABASE is required + system stop pulling replication log on cluster $cluster $CLICKHOUSE_DATABASE.rmt2; optimize table rmt1 final settings alter_sync=0, optimize_throw_if_noop=1; " || exit 1 diff --git a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference index ac0f4662db2..3603ebe3e0d 100644 --- a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference +++ b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference @@ -1,22 +1,25 @@ -- negative tests --- const and non-const arguments -719527 2149-06-06 2149-06-06 -719528 1970-01-01 1970-01-01 -719529 1970-01-02 1970-01-02 -785062 2149-06-05 2149-06-05 -785063 2149-06-06 2149-06-06 -785064 1970-01-01 1970-01-01 -693960 2299-12-31 2299-12-31 -693961 1900-01-01 1900-01-01 -693962 1900-01-02 1900-01-02 -840056 2299-12-30 2299-12-30 -840057 2299-12-31 2299-12-31 -840058 2299-12-31 2299-12-31 --- integer types != UInt32 -255 1974-06-12 2299-12-31 -65535 1973-09-29 2299-12-31 -719529 1970-01-02 1970-01-02 +-- UInt32 and Int32 arguments, both const and non-const +719527 719527 2149-06-06 2149-06-06 2149-06-06 2149-06-06 +719528 719528 1970-01-01 1970-01-01 1970-01-01 1970-01-01 +719529 719529 1970-01-02 1970-01-02 1970-01-02 1970-01-02 +785062 785062 2149-06-05 2149-06-05 2149-06-05 2149-06-05 +785063 785063 2149-06-06 2149-06-06 2149-06-06 2149-06-06 +785064 785064 1970-01-01 1970-01-01 1970-01-01 1970-01-01 +693960 693960 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +693961 693961 1900-01-01 1900-01-01 1900-01-01 1900-01-01 +693962 693962 1900-01-02 1900-01-02 1900-01-02 1900-01-02 +840056 840056 2299-12-30 2299-12-30 2299-12-30 2299-12-30 +840057 840057 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +840058 840058 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +-- integer types != (U)Int32 +255 127 1974-06-12 2299-12-31 1974-02-04 2299-12-31 +65535 32767 1973-09-29 2299-12-31 2063-06-17 2299-12-31 +719529 719529 1970-01-02 1970-01-02 1970-01-02 1970-01-02 -- NULL handling \N \N +-- ubsan bugs +2299-12-31 +2299-12-31 -- Alias 1973-10-01 diff --git a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql index 83cfa01d5ed..9f356080fe8 100644 --- a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql +++ b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql @@ -7,32 +7,35 @@ SELECT fromDaysSinceYearZero(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_ SELECT fromDaysSinceYearZero32(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT fromDaysSinceYearZero('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT fromDaysSinceYearZero32('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -SELECT fromDaysSinceYearZero(-3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -SELECT fromDaysSinceYearZero32(-3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT fromDaysSinceYearZero(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT fromDaysSinceYearZero32(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT '-- const and non-const arguments'; +SELECT '-- UInt32 and Int32 arguments, both const and non-const'; +SELECT 719527 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range +SELECT 719528 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 719529 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785062 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785063 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785064 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range -SELECT 719527 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -- outside Date's range -SELECT 719528 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 719529 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785062 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785063 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785064 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -- outside Date's range +SELECT 693960 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range +SELECT 693961 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 693962 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840056 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840057 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840058 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range -SELECT 693960 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -- outside Date32's range -SELECT 693961 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 693962 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840056 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840057 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840058 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -- outside Date32's range - -SELECT '-- integer types != UInt32'; -SELECT toUInt8(255) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- outside Date's range for all UInt8-s -SELECT toUInt16(65535) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- outside Date's range for all UInt16-s -SELECT toUInt64(719529) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- something useful +SELECT '-- integer types != (U)Int32'; +SELECT toUInt8(255) AS u, toInt8(127) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int8-s +SELECT toUInt16(65535) AS u, toInt16(32767) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int16-s +SELECT toUInt64(719529) AS u, toInt64(719529) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- something useful SELECT '-- NULL handling'; SELECT fromDaysSinceYearZero(NULL), fromDaysSinceYearZero32(NULL); +SELECT '-- ubsan bugs'; +SELECT fromDaysSinceYearZero32(2147483648); +SELECT fromDaysSinceYearZero32(3); + SELECT '-- Alias'; SELECT FROM_DAYS(1); diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference index d7850e59dec..af0e50ec332 100644 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference @@ -1,5 +1,5 @@ Creating 300 tables -Making making 500 requests to system.replicas +Making making 200 requests to system.replicas Query system.replicas while waiting for other concurrent requests to finish 0 900 diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index 70dc5f4d8c4..f93175529c0 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -8,18 +8,19 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e NUM_TABLES=300 -CONCURRENCY=500 +CONCURRENCY=200 echo "Creating $NUM_TABLES tables" function init_table() { + set -e i=$1 - curl $CLICKHOUSE_URL --silent --fail --data "CREATE TABLE test_02908_r1_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r1') ORDER BY tuple()" - curl $CLICKHOUSE_URL --silent --fail --data "CREATE TABLE test_02908_r2_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r2') ORDER BY tuple()" - curl $CLICKHOUSE_URL --silent --fail --data "CREATE TABLE test_02908_r3_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r3') ORDER BY tuple()" + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r1_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r1') ORDER BY tuple()" 2>&1 + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r2_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r2') ORDER BY tuple()" 2>&1 + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r3_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r3') ORDER BY tuple()" 2>&1 - curl $CLICKHOUSE_URL --silent --fail --data "INSERT INTO test_02908_r1_$i SELECT rand64() FROM numbers(5);" + curl $CLICKHOUSE_URL --silent --fail --show-error --data "INSERT INTO test_02908_r1_$i SELECT rand64() FROM numbers(5);" 2>&1 } export init_table; @@ -36,13 +37,13 @@ echo "Making making $CONCURRENCY requests to system.replicas" for i in `seq 1 $CONCURRENCY`; do - curl $CLICKHOUSE_URL --silent --fail --data "SELECT * FROM system.replicas WHERE database=currentDatabase() FORMAT Null;" & + curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT * FROM system.replicas WHERE database=currentDatabase() FORMAT Null;" 2>&1 || echo "query $i failed" & done echo "Query system.replicas while waiting for other concurrent requests to finish" # lost_part_count column is read from ZooKeeper -curl $CLICKHOUSE_URL --silent --fail --data "SELECT sum(lost_part_count) FROM system.replicas WHERE database=currentDatabase();"; +curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(lost_part_count) FROM system.replicas WHERE database=currentDatabase();" 2>&1; # is_leader column is filled without ZooKeeper -curl $CLICKHOUSE_URL --silent --fail --data "SELECT sum(is_leader) FROM system.replicas WHERE database=currentDatabase();"; +curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(is_leader) FROM system.replicas WHERE database=currentDatabase();" 2>&1; wait; diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql new file mode 100644 index 00000000000..84250059c58 --- /dev/null +++ b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql @@ -0,0 +1,17 @@ +CREATE TABLE t_r1 +( + `id` UInt64, + `val` SimpleAggregateFunction(max, Nullable(String)) +) +ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r1') +ORDER BY id +SETTINGS index_granularity = 8192; + +CREATE TABLE t_r2 +( + `id` UInt64, + `val` SimpleAggregateFunction(anyLast, Nullable(String)) +) +ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r2') +ORDER BY id +SETTINGS index_granularity = 8192; -- { serverError INCOMPATIBLE_COLUMNS } diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.reference b/tests/queries/0_stateless/02910_rocksdb_optimize.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.sql b/tests/queries/0_stateless/02910_rocksdb_optimize.sql new file mode 100644 index 00000000000..575ba6db212 --- /dev/null +++ b/tests/queries/0_stateless/02910_rocksdb_optimize.sql @@ -0,0 +1,5 @@ +-- Tags: use-rocksdb + +CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO dict SELECT number, toString(number) FROM numbers(1e3); +OPTIMIZE TABLE dict; diff --git a/tests/queries/0_stateless/02911_add_index_and_materialize_index.reference b/tests/queries/0_stateless/02911_add_index_and_materialize_index.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql b/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql new file mode 100644 index 00000000000..f8785ec9a38 --- /dev/null +++ b/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql @@ -0,0 +1,18 @@ +-- Tags: no-replicated-database + +DROP TABLE IF EXISTS index_test; + +CREATE TABLE index_test +( + x UInt32, + y UInt32, + z UInt32 +) ENGINE = MergeTree order by x; + +ALTER TABLE index_test + ADD INDEX i_x mortonDecode(2, z).1 TYPE minmax GRANULARITY 1, + ADD INDEX i_y mortonDecode(2, z).2 TYPE minmax GRANULARITY 1, + MATERIALIZE INDEX i_x, + MATERIALIZE INDEX i_y; + +drop table index_test; diff --git a/tests/queries/0_stateless/02911_arrow_large_list.reference b/tests/queries/0_stateless/02911_arrow_large_list.reference new file mode 100644 index 00000000000..a6fbcce8c06 --- /dev/null +++ b/tests/queries/0_stateless/02911_arrow_large_list.reference @@ -0,0 +1,4 @@ +a +Array(Nullable(String)) +['00000','00001','00002'] +['10000','10001','10002'] diff --git a/tests/queries/0_stateless/02911_arrow_large_list.sh b/tests/queries/0_stateless/02911_arrow_large_list.sh new file mode 100755 index 00000000000..9b1c9a9d0ed --- /dev/null +++ b/tests/queries/0_stateless/02911_arrow_large_list.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# ## generate arrow file with python +# import pyarrow as pa +# schema = pa.schema([ pa.field('a', pa.large_list(pa.utf8())) ]) +# a = pa.array([["00000", "00001", "00002"], ["10000", "10001", "10002"]]) +# with pa.OSFile('arraydata.arrow', 'wb') as sink: +# with pa.ipc.new_file(sink, schema=schema) as writer: +# batch = pa.record_batch([a], schema=schema) +# writer.write(batch) + +# cat arraydata.arrow | base64 + +cat < /dev/null + +$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" 2>&1 | grep -Fq "UNKNOWN_DATABASE" && echo 'OK' || echo 'ERROR' +done + +$CLICKHOUSE_CLIENT -q "RESTORE DATABASE $database_name FROM Disk('backups', '$backup_path');" > /dev/null + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" +done + +$CLICKHOUSE_CLIENT -q "DROP TABLE $database_name.02911_backup_restore_keeper_map3 SYNC;" + +$CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map3;" 2>&1 | grep -Fq "UNKNOWN_TABLE" && echo 'OK' || echo 'ERROR' + +$CLICKHOUSE_CLIENT -q "RESTORE TABLE $database_name.02911_backup_restore_keeper_map3 FROM Disk('backups', '$backup_path');" > /dev/null + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" +done + +$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" \ No newline at end of file diff --git a/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference new file mode 100644 index 00000000000..61effdb19c4 --- /dev/null +++ b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference @@ -0,0 +1,13 @@ +value +value1 value2 +value1 value1 value2 +NOT-FOUND-KEY is not in HTTP request headers +FORBIDDEN-KEY1 is in get_client_http_header_forbidden_headers +1 row1_value1 row1_value2 row1_value3 row1_value4 row1_value5 row1_value6 row1_value7 +2 row2_value1 row2_value2 row2_value3 row2_value4 row2_value5 row2_value6 row2_value7 +3 +value_from_query_1 value_from_query_2 value_from_query_3 1 row1_value1 row1_value2 row1_value3 row1_value4 row1_value5 row1_value6 row1_value7 +value_from_query_1 value_from_query_2 value_from_query_3 2 row2_value1 row2_value2 row2_value3 row2_value4 row2_value5 row2_value6 row2_value7 +value_from_query_1 value_from_query_2 value_from_query_3 3 +http_value1 +http_value2 diff --git a/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh new file mode 100755 index 00000000000..505e017ee5d --- /dev/null +++ b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo "SELECT getClientHTTPHeader('key')" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' -H 'key: value' 'http://localhost:8123/' -d @- + +echo "SELECT getClientHTTPHeader('key1'), getClientHTTPHeader('key2')" | curl -s -H 'X-Clickhouse-User: default' \ + -H 'X-ClickHouse-Key: ' -H 'key1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @- + +echo "SELECT getClientHTTPHeader('test-' || 'key' || '-1'), getClientHTTPHeader('test-key-1'), getClientHTTPHeader('key2')" | curl -s -H 'X-Clickhouse-User: default' \ + -H 'X-ClickHouse-Key: ' -H 'test-key-1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @- + +#Code: 36. DB::Exception: NOT-FOUND-KEY is not in HTTP request headers +echo "SELECT getClientHTTPHeader('NOT-FOUND-KEY')"| curl -s -H 'X-Clickhouse-User: default' \ + -H 'X-ClickHouse-Key: ' -H 'key1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @- | grep -o -e "NOT-FOUND-KEY is not in HTTP request headers" + +#Code: 36. DB::Exception: The header FORBIDDEN-KEY is in headers_forbidden_to_return, you can config it in config file. +echo "SELECT getClientHTTPHeader('FORBIDDEN-KEY1')" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \ + -H 'FORBIDDEN-KEY1: forbbiden1' 'http://localhost:8123/' -d @- | grep -o -e "FORBIDDEN-KEY1 is in get_client_http_header_forbidden_headers" + +db_name=${CLICKHOUSE_DATABASE} + +$CLICKHOUSE_CLIENT -q "CREATE DATABASE IF NOT EXISTS ${db_name};" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE ${db_name}.02884_get_http_header + (id UInt32, + http_key1 String DEFAULT getClientHTTPHeader('http_header_key1'), + http_key2 String DEFAULT getClientHTTPHeader('http_header_key2'), + http_key3 String DEFAULT getClientHTTPHeader('http_header_key3'), + http_key4 String DEFAULT getClientHTTPHeader('http_header_key4'), + http_key5 String DEFAULT getClientHTTPHeader('http_header_key5'), + http_key6 String DEFAULT getClientHTTPHeader('http_header_key6'), + http_key7 String DEFAULT getClientHTTPHeader('http_header_key7') + ) + Engine=MergeTree() + ORDER BY id" + +#Insert data via http request +echo "INSERT INTO ${db_name}.02884_get_http_header (id) values (1)" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \ + -H 'http_header_key1: row1_value1'\ + -H 'http_header_key2: row1_value2'\ + -H 'http_header_key3: row1_value3'\ + -H 'http_header_key4: row1_value4'\ + -H 'http_header_key5: row1_value5'\ + -H 'http_header_key6: row1_value6'\ + -H 'http_header_key7: row1_value7' 'http://localhost:8123/' -d @- + +echo "INSERT INTO ${db_name}.02884_get_http_header (id) values (2)" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \ + -H 'http_header_key1: row2_value1'\ + -H 'http_header_key2: row2_value2'\ + -H 'http_header_key3: row2_value3'\ + -H 'http_header_key4: row2_value4'\ + -H 'http_header_key5: row2_value5'\ + -H 'http_header_key6: row2_value6'\ + -H 'http_header_key7: row2_value7' 'http://localhost:8123/' -d @- + +$CLICKHOUSE_CLIENT -q "SELECT id, http_key1, http_key2, http_key3, http_key4, http_key5, http_key6, http_key7 FROM ${db_name}.02884_get_http_header ORDER BY id;" +#Insert data via tcp client +$CLICKHOUSE_CLIENT --param_db="$db_name" -q "INSERT INTO ${db_name}.02884_get_http_header (id) values (3)" +$CLICKHOUSE_CLIENT --param_db="$db_name" -q "SELECT * FROM ${db_name}.02884_get_http_header where id = 3" + +echo "SELECT getClientHTTPHeader('key_from_query_1'), getClientHTTPHeader('key_from_query_2'), getClientHTTPHeader('key_from_query_3'), * FROM ${db_name}.02884_get_http_header ORDER BY id" | curl -s -H 'X-Clickhouse-User: default' \ + -H 'X-ClickHouse-Key: ' -H 'key_from_query_1: value_from_query_1' -H 'key_from_query_2: value_from_query_2' -H 'key_from_query_3: value_from_query_3' 'http://localhost:8123/' -d @- + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS ${db_name}.02884_get_http_header" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE IF NOT EXISTS ${db_name}.02884_header_from_table (header_name String) Engine=Memory" +$CLICKHOUSE_CLIENT -q "INSERT INTO ${db_name}.02884_header_from_table values ('http_key1'), ('http_key2')" + +echo "SELECT getClientHTTPHeader(header_name) as value from (select * FROM ${db_name}.02884_header_from_table) order by value" | curl -s -H 'X-Clickhouse-User: default' \ + -H 'X-ClickHouse-Key: ' -H 'http_key1: http_value1' -H 'http_key2: http_value2' 'http://localhost:8123/' -d @- + +$CLICKHOUSE_CLIENT -q "DROP DATABASE ${db_name}" diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference new file mode 100644 index 00000000000..976c1503b02 --- /dev/null +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference @@ -0,0 +1,25 @@ +-- { echoOn } +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t1.y <=> t2.y AND t2.x IS NULL) ORDER BY t1.x NULLS LAST; +1 42 4 42 +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) AND t1.y <=> t2.y ORDER BY t1.x NULLS LAST; +2 2 2 2 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.y <=> t2.y OR (t1.x IS NULL AND t1.y IS NULL AND t2.x IS NULL AND t2.y IS NULL)) ORDER BY t1.x NULLS LAST; +1 42 4 42 +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) AND (t1.y == t2.y OR (t1.y IS NULL AND t2.y IS NULL)) AND COALESCE(t1.x, 0) != 2 ORDER BY t1.x NULLS LAST; +\N \N \N \N +SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; +0 +1 +1 +1 diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql new file mode 100644 index 00000000000..6a98a7bb57b --- /dev/null +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; +CREATE TABLE t2 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; + +INSERT INTO t1 VALUES (1,42), (2,2), (3,3), (NULL,NULL); +INSERT INTO t2 VALUES (NULL,NULL), (2,2), (3,33), (4,42); + +SET allow_experimental_analyzer = 1; + +-- { echoOn } +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t1.y <=> t2.y AND t2.x IS NULL) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) AND t1.y <=> t2.y ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.y <=> t2.y OR (t1.x IS NULL AND t1.y IS NULL AND t2.x IS NULL AND t2.y IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) AND (t1.y == t2.y OR (t1.y IS NULL AND t2.y IS NULL)) AND COALESCE(t1.x, 0) != 2 ORDER BY t1.x NULLS LAST; + +SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; +-- { echoOff } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02911_support_alias_column_in_indices.reference b/tests/queries/0_stateless/02911_support_alias_column_in_indices.reference new file mode 100644 index 00000000000..883966ce6b5 --- /dev/null +++ b/tests/queries/0_stateless/02911_support_alias_column_in_indices.reference @@ -0,0 +1,55 @@ +Expression ((Projection + Before ORDER BY)) + Filter (WHERE) + ReadFromMergeTree (02911_support_alias_column_in_indices.test1) + Indexes: + PrimaryKey + Keys: + c + Condition: (plus(c, 1) in [11, +Inf)) + Parts: 1/2 + Granules: 1/2 + Skip + Name: i + Description: minmax GRANULARITY 1 + Parts: 1/1 + Granules: 1/1 +Expression ((Project names + Projection)) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromMergeTree (02911_support_alias_column_in_indices.test1) + Indexes: + PrimaryKey + Keys: + c + Condition: (_CAST(plus(c, \'UInt64\'), 1) in [11, +Inf)) + Parts: 1/2 + Granules: 1/2 + Skip + Name: i + Description: minmax GRANULARITY 1 + Parts: 1/1 + Granules: 1/1 +Expression ((Projection + Before ORDER BY)) + Filter (WHERE) + ReadFromMergeTree (02911_support_alias_column_in_indices.test2) + Indexes: + PrimaryKey + Keys: + c + Condition: (plus(plus(c, 1), 1) in [16, +Inf)) + Parts: 1/2 + Granules: 1/2 + Skip + Name: i + Description: minmax GRANULARITY 1 + Parts: 1/1 + Granules: 1/1 +Expression ((Project names + Projection)) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromMergeTree (02911_support_alias_column_in_indices.test2) + Indexes: + PrimaryKey + Keys: + c + Condition: (_CAST(plus(_CAST(plus(c, \'UInt64\'), 1), \'UInt64\'), 1) in [16, +Inf)) + Parts: 1/2 + Granules: 1/2 diff --git a/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql b/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql new file mode 100644 index 00000000000..93d9a1670db --- /dev/null +++ b/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel + +drop database if exists 02911_support_alias_column_in_indices; +create database 02911_support_alias_column_in_indices; +use 02911_support_alias_column_in_indices; + +create table test1 +( + c UInt32, + a alias c + 1, + index i (a) type minmax +) engine = MergeTree order by c; + +insert into test1 select * from numbers(10); +insert into test1 select * from numbers(11, 20); + +explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 0; +explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 1; + +create table test2 +( + c UInt32, + a1 alias c + 1, + a2 alias a1 + 1, + index i (a2) type minmax +) engine = MergeTree order by c; + +insert into test2 select * from numbers(10); +insert into test2 select * from numbers(11, 20); + +explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 0; +explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 1; -- buggy, analyzer does not pick up index i + +drop database 02911_support_alias_column_in_indices; diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_5.reference b/tests/queries/0_stateless/02915_analyzer_fuzz_5.reference new file mode 100644 index 00000000000..f2386499865 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_5.reference @@ -0,0 +1,2 @@ +limit w/ GROUP BY 0 0 +limit w/ GROUP BY 0 0 diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql new file mode 100644 index 00000000000..29d06d2c315 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql @@ -0,0 +1,6 @@ +set allow_experimental_analyzer=1; +SET max_block_size = 1000; +SET max_threads = 4; +SET max_rows_to_group_by = 3000, group_by_overflow_mode = 'any'; +SELECT 'limit w/ GROUP BY', count(NULL), number FROM remote('127.{1,2}', view(SELECT intDiv(number, 2147483647) + AS number FROM numbers(10))) GROUP BY number WITH ROLLUP ORDER BY count() ASC, number DESC NULLS LAST SETTINGS limit = 2; diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference new file mode 100644 index 00000000000..b5c035d8576 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference @@ -0,0 +1,2 @@ +[(0,0)] +[(1,1)] diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql new file mode 100644 index 00000000000..b4eb1b4aff4 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql @@ -0,0 +1,19 @@ +set allow_suspicious_low_cardinality_types=1; +set allow_experimental_analyzer=1; + +create table tab (x LowCardinality(Nullable(Float64))) engine = MergeTree order by x settings allow_nullable_key=1; +insert into tab select number from numbers(2); +SELECT [(arrayJoin([x]), x)] AS row FROM tab; + + +CREATE TABLE t__fuzz_307 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = + ReplacingMergeTree ORDER BY (k1, k2) settings allow_nullable_key=1; + insert into t__fuzz_307 select * from generateRandom() limit 10; + SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin( +[tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_307 FINAL ORDER BY (toNullable('655.36'), 2, toNullable +('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; + +CREATE TABLE t__fuzz_282 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = ReplacingMergeTree ORDER BY (k1, k2) SETTINGS allow_nullable_key = 1; +INSERT INTO t__fuzz_282 VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5); + +SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin([tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_282 FINAL ORDER BY (toNullable('655.36'), 2, toNullable('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql new file mode 100644 index 00000000000..3b30a2b6c2c --- /dev/null +++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql @@ -0,0 +1,57 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + +create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +insert into shard_0.from_0 select number from numbers(10); +insert into shard_0.from_0 select number + 10 from numbers(10); + +insert into shard_0.from_1 select number + 20 from numbers(10); +insert into shard_0.from_1 select number + 30 from numbers(10); + +system sync replica shard_1.from_0; +system sync replica shard_1.from_1; + + +create table shard_0.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_1.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +detach table shard_1.to; + +alter table shard_0.from_0 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +alter table shard_0.from_1 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +OPTIMIZE TABLE shard_0.from_0; +OPTIMIZE TABLE shard_1.from_0; +OPTIMIZE TABLE shard_0.from_1; +OPTIMIZE TABLE shard_1.from_1; +OPTIMIZE TABLE shard_0.to; + +system restart replica shard_0.to; + +select sleep(2); + +attach table shard_1.to; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + diff --git a/tests/queries/0_stateless/02916_addcolumn_nested.reference b/tests/queries/0_stateless/02916_addcolumn_nested.reference new file mode 100644 index 00000000000..7d79cd8731f --- /dev/null +++ b/tests/queries/0_stateless/02916_addcolumn_nested.reference @@ -0,0 +1,4 @@ +CREATE TABLE default.nested_table\n(\n `id` UInt64,\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `third` Nested(e Int8, f String),\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `third` Nested(e Int8, f String),\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String),\n `fourth.g` Array(Int8),\n `fourth.h` Array(String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02916_addcolumn_nested.sql b/tests/queries/0_stateless/02916_addcolumn_nested.sql new file mode 100644 index 00000000000..1e64fca6a15 --- /dev/null +++ b/tests/queries/0_stateless/02916_addcolumn_nested.sql @@ -0,0 +1,22 @@ +SET flatten_nested = 0; + +DROP TABLE IF EXISTS nested_table; +CREATE TABLE nested_table (id UInt64, first Nested(a Int8, b String)) ENGINE = MergeTree() ORDER BY id; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN second Nested(c Int8, d String) AFTER id; +SHOW CREATE nested_table; + +SET flatten_nested = 0; + +ALTER TABLE nested_table ADD COLUMN third Nested(e Int8, f String) FIRST; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN fourth Nested(g Int8, h String); +SHOW CREATE nested_table; + +DROP TABLE nested_table; diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.reference b/tests/queries/0_stateless/02916_glogal_in_cancel.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.sql b/tests/queries/0_stateless/02916_glogal_in_cancel.sql new file mode 100644 index 00000000000..ad54f1ecdec --- /dev/null +++ b/tests/queries/0_stateless/02916_glogal_in_cancel.sql @@ -0,0 +1,2 @@ +set max_execution_time = 0.5, timeout_overflow_mode = 'break'; +SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null; diff --git a/tests/queries/0_stateless/02916_joinget_dependency.reference b/tests/queries/0_stateless/02916_joinget_dependency.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02916_joinget_dependency.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02916_joinget_dependency.sh b/tests/queries/0_stateless/02916_joinget_dependency.sh new file mode 100755 index 00000000000..6477ae8c967 --- /dev/null +++ b/tests/queries/0_stateless/02916_joinget_dependency.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# We test the dependency on the DROP + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE IF EXISTS Sub_distributed; + DROP TABLE IF EXISTS Sub; + DROP TABLE IF EXISTS Mapping; + + CREATE TABLE Mapping (Id UInt64, RegionId UInt64) ENGINE = Join(ANY,LEFT,Id); + INSERT INTO Mapping VALUES (1,1); + CREATE TABLE Sub (Id UInt64, PropertyId UInt64) ENGINE = MergeTree() PRIMARY KEY (Id) ORDER BY (Id); + CREATE TABLE Sub_distributed (Id UInt64, PropertyId UInt64)ENGINE = Distributed('test_shard_localhost', $CLICKHOUSE_DATABASE, Sub, joinGet('$CLICKHOUSE_DATABASE.Mapping','RegionId',PropertyId));" + +$CLICKHOUSE_CLIENT -q " + DROP TABLE Mapping; +" 2>&1 | grep -cm1 "HAVE_DEPENDENT_OBJECTS" + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE Sub_distributed; + DROP TABLE Sub; + DROP TABLE Mapping; +" \ No newline at end of file diff --git a/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql new file mode 100644 index 00000000000..010e29a34e8 --- /dev/null +++ b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql @@ -0,0 +1,26 @@ +-- Tags: no-replicated-database, no-fasttest +-- Tag no-replicated-database: different number of replicas + +create table tableIn (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '1') + order by tuple() + settings + storage_policy='s3_cache', + allow_remote_fs_zero_copy_replication=1, + sleep_before_commit_local_part_in_replicated_table_ms=5000; +create table tableOut (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '2') + order by tuple() + settings + storage_policy='s3_cache', + allow_remote_fs_zero_copy_replication=1; + +SET send_logs_level='error'; + +insert into tableIn values(1); +insert into tableIn values(2); +system sync replica tableOut; +select count() from tableOut; + +drop table tableIn; +drop table tableOut; diff --git a/tests/queries/0_stateless/02916_set_formatting.reference b/tests/queries/0_stateless/02916_set_formatting.reference new file mode 100644 index 00000000000..34ff52365f9 --- /dev/null +++ b/tests/queries/0_stateless/02916_set_formatting.reference @@ -0,0 +1,11 @@ +SET additional_table_filters = {\'kjsnckjn\':\'ksanmn\', \'dkm\':\'dd\'} +SELECT v FROM t1 SETTINGS additional_table_filters = {\'default.t1\':\'s\'} +Row 1: +────── +statement: CREATE VIEW default.v1 +( + `v` UInt64 +) AS +SELECT v +FROM default.t1 +SETTINGS additional_table_filters = {'default.t1':'s != \'s1%\''} diff --git a/tests/queries/0_stateless/02916_set_formatting.sql b/tests/queries/0_stateless/02916_set_formatting.sql new file mode 100644 index 00000000000..10b875293f1 --- /dev/null +++ b/tests/queries/0_stateless/02916_set_formatting.sql @@ -0,0 +1,13 @@ +SELECT formatQuerySingleLine('set additional_table_filters = {\'kjsnckjn\': \'ksanmn\', \'dkm\': \'dd\'}'); +SELECT formatQuerySingleLine('SELECT v FROM t1 SETTINGS additional_table_filters = {\'default.t1\': \'s\'}'); + +DROP TABLE IF EXISTS t1; +DROP VIEW IF EXISTS v1; + +CREATE TABLE t1 (v UInt64, s String) ENGINE=MergeTree() ORDER BY v; +CREATE VIEW v1 (v UInt64) AS SELECT v FROM t1 SETTINGS additional_table_filters = {'default.t1': 's != \'s1%\''}; + +SHOW CREATE TABLE v1 FORMAT Vertical; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/tests/queries/0_stateless/02918_analyzer_to_ast_crash.reference b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql new file mode 100644 index 00000000000..274f74d6ad1 --- /dev/null +++ b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql @@ -0,0 +1,5 @@ +WITH + x AS (SELECT in((SELECT * FROM y))), + y AS (SELECT 1) +SELECT * FROM x; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.reference b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference new file mode 100644 index 00000000000..1b5c6f46f77 --- /dev/null +++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference @@ -0,0 +1,152 @@ +{"QJC4GhRByEtEAjku":{}} +{} +{} +{} +{} +{} +{} +{} +{} +{} +{"Cicktxh":true, "SpByjZKtr2VAyHCO":false} +{"ClickHouse":"Is Fast", "VO7TCIkyu1akvN":{}} +{"ClickHouse":"Is Fast"} +{"ISlW1DB":"Is Fast", "5j4ATkq":{}} +{"ClickHouse":false} +{"ClickHouse":"Is Fast", "tRSz":13522460516091116060} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast"} +{"CzTcYkQdSce":"Is Fast"} +{"ClickHouse":"Is Fast"} +{"ClickHouse":false} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast", "jql0YAY":[]} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast", "lF2vXus":false} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast"} +{"ClickHouse":"Is Fast"} +{"QJiGcwkonghk":"Is Fast"} +{"sidetx":[{"name":"Alice"}, {"R6Vm":false}, {}], "SpByjZKtr2VAyHCO":false} +{"students":[{"name":"Alice"}, {"name":"Bob"}]} +{"students":[{"name":"Alice"}, {"name":true}]} +{"students":[{"name":"Alice"}, {"name":"Bob"}]} +{"ISuW1":[{"naYmS":"Alice", "hzTDYZQdScOct0RS":[]}, {"name":"Bob"}]} +{"students":[{"name":"Alice"}, {"name":"Bob"}], "jql0YAY":[]} +{"students":[{"name":"Alice"}, {"name":"Bob"}], "lF2vXus":false} +{"students":[{"QJmGe":"Alice"}, {"name":"Bob"}]} +{"students":[{"name":"Alice"}, {"name":"Bob"}]} +{"kXtdet":[{"name":"Alice"}, {"name":"Bob"}]} +{"students":[{"name":"Alice"}, {"name":"Bob"}], "Qcm4":{}} +{"students":[{"name":"Alice"}, {"PmjG":"Bob"}]} +{"students":[{"name":6128974479331836233}, {"name":"Bob"}]} +{"sGudyet5u":[{"name":"Alice"}, {"name":"Bob"}, {}]} +{"students":[{"name":"Alice"}, {"name":"Bob"}]} +{"students":[{"Kamc":true}, {"name":"rKKN+5#NKEi-uf5U"}]} +{"students":[{"name":"Alice"}, {"nPL6":1455900058404521160}]} +{"students":[{"name":"Alice", "dzm5g9aPI21iIP9":[]}, {"name":"Bob"}]} +{"students":[{"n4z4N":true, "uJrCh4ifo":{}}, {"name":"Bob", "kMnsl0BBFk":[]}], "kG21YiAcUKpcUS2":true} +{"students":[{"name":"Alice"}, {"name":"Bob", "wQCN":{}}]} +{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]} +{"schedule":[{"bdvelrflX":"7am", "5ZB35":{"nHypO":[]}}, {"23slh":"12pm"}]} +{"tkdu8hl":[{"bdvelrflX":"7am", "5ZB35":{"nHypO":[]}}, {"23slh":"12pm"}]} +{"tkdu8hl":[{"bdvelrflX":"7am", "5mkj5":{"nHypO":[]}}, {"23slh":"12pm"}], "n8HX5N6DVpBa":["fYOPSVVK*Brv_-AajZwT"]} +{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"npOE":[[]]}}, {"23slh":"12pm"}], "nHXa6BVq8E":["fYOPSVVK*Brv_-AajZwT"], "BHUNvB8sHk8ts6":true} +{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"G71D":[[], []]}}, {"23slh":"12pm"}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "BHUNvB8sHk8ts6":true, "qnk47QAn0yQ3ESEgO":true} +{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"G71D":[[], []]}}, {"23slh":"-plal2e"}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "BHUNvB8sHk8ts6":true, "qnk47QAn0yQ3ESEgO":true} +{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"Gpq7":[[], [false]]}, "YgbEtY":true}, {"23slh":false}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "ByRvBC4H0kgydJ":false, "zqokAQz8z0KnPOBrs8":true} +{"kzcUZOl":[{"nQ4PePPfX":16091119822740071899, "Ekmj":{"lBKR":[[], [false], []], "dLc32r2f":{}}, "xbguW":"vGV&bitEteAH%-Eigg_7VlejYuHP"}, {"23slh":false}, {}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "ByRvBC4H0kgydJ":false, "zqokAQz8z0KnPOBrs8":true} +{"kzcUZOl":[{"nQ4PePPfX":16091119822740071899, "Ekmj":{"lBKR":[[3774015142547830176], [false], []], "rCmVPvvf":{"wU6YWjag":[]}}, "xb7uW":"pWUTs&ikTCNRQt"}, {"23slh":false}, {}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zqokAQz8z0KnPOBrs8":true} +{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[14228925827882160318, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":true}, {}, {"qOElRhbehMXQNrln":{"PDoZa8OJHh1al59Ggq":{}}}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zqokAQz8z0KnPOBrs8":true} +{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[14228925827882160318, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":18233789955605096603}, {}, {"qOElRhbehMXQNrln":{"PoZngOHXMaWGRJq":{"QlnPi9zKoBtW2nGWB":"LgFazuGX*CuDy7X%4hkEmykg@6"}}}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zQO8BA7nazqKW7CRP8":true} +{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[16730631663303458403, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[false], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":18233789955605096603, "mmCFLovnBThJPtpQG0Tv":false}, {}, {"qOElRhbehMXQNrln":{"PoZngOHXMaWGRJq":{"QlnPi9zKoBtW2nGWB":"LgFazuGX*CuDy7X%4hkEmykg@6"}}}, {"sx21nRmS69bXRo":[]}], "h3IK06PQGfCRQ":[false, "HjPw@G1Icu#dn"], "SyRRLBzEjy8YJ":false, "zQO8BA7nazqKW7CRP8":true} +{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D"], [true], [], []], "rOmNvc":{"wOWxSWQf":[false], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jlT1T35c27wbl2":true}, {"dsyf":18233789955605096603, "mYikENkiDhPRtQHOr":true}, {}, {"qOElRhbehMXQNrln":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "JWOUP4WB1":14622543266409160782}}}, {"sx21nRmS69bXRo":[]}], "h3IK06PQGfCRQ":[false, "HjPw@G1Icu#dn"], "S1ncA0ERs8Y9v":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true} +{"ukrzZl":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", false], [true, true], [], []], "rOmNvc":{"wOWxSWQf":[false], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988}, "igqgnb":[], "pUDeAJw":"MN^9hUPKv811Vq!"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":18233789955605096603, "mYikENkiDhPRtQHOr":true}, {}, {"qOElRhbehMXQNrln":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "aOUaQBB":false}}}, {"x27uem04bX6R87b":[[]]}, {"MqSQ5v":[]}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc"], "S1ncA0ERs8Y9v":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true} +{"UkPbWZl":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", false], [true, true], [false], []], "rvCMyf":{"2pnWUuQ6J":[false, "q-5Gl5B8uOK"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":516601863564431352}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":"F!*nU1V_WOni8$a9RXBHGob^sg", "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "OUlR":false}}}, {"x27uem04bX6R87b":[[]]}, {"MqSQ5v":[]}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc", true], "dlCX4s8LF":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true, "XahaweEPjnHUyKsT":{}} +{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", 17822336972471685000], [true, true], [false], [], []], "rvCMyf":{"2pnWUuQ6J":[false, "q-5Gl5B8uOK"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"7Qf27pQMkchIOBWX":{"QaPSqINbjb7nGx9qz":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[]], []]}, {"MqSQ5v":[9304041946960766827]}, {}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc", true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}} +{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "mGJx":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", 17822336972471685000], [true, true], [10370853850869029207], [], ["VaTduwAFH0ahN5xeJU"]], "rvCMyf":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"7Qf27pQMkchIOBWX":{"aKaShNyxj7Gx9qB":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[]], []]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "h3IK06PQGfCRQ":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}} +{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "xGBZx":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", "sFwAP3"], [true, "-TBj_T1BS7OJh8^p1qO3!DK_X&CfwetZ"], [5795439407585677270, false], [], ["VaTduwAFH0ahN5xeJU"]], "OvMy":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "wni3QGXfpgeq":{"QF0hiIqRIKp2mp04U":14287172497490584292}, "M8pg0INzhg3Hz":14538916875375166988, "yeNIt3JgSC0K":false, "TeFWw":[]}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"DjYSOeUFNepEK4XvC":{"7Qf27pQMkchIOBWX":{"aKaShNyxj7Gx9qB":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[15632688604980432085]], []]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "h3IK06PQGfCRQ":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}} +{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "xGBZx":{"lB3l":[["_#JSXSLdVKXb+c", "eiUmT%F$FQBWtWz^Tt7Ix&D", "sFwAP3"], [true, "-TBj_T1BS7OJh8^p1qO3!DK_X&CfwetZ"], [5795439407585677270, false], [], ["VaTduwAFH0ahN5xeJU"]], "OvMy":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "wni3QGXfpgeq":{"QF0hiIqRIKp2mp04U":14287172497490584292}, "M8pg0INzhg3Hz":14538916875375166988, "yeNIt3JgSC0K":false, "TeFWw":[]}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"8&VE7"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true, "lbci":{}}, {}, {"DjYSOeUFNepEK4XvC":{"QVEsjfQBcsIEbRWBW":{"uGYvt33UTmxj7t2B":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}, "Qya8i":{"EMfurslq2KFOCa29od0d":[]}}}, {"x27uem04bX6R87b":[[[15632688604980432085]], [[]]]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "sEdwKHDRafKvC":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}} +{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]} +{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]} +{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]} +{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]} +{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}} +{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}} +{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]} +{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}], "6EigJgc8sxf7VIfMkDl":[]} +{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]} +{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]} +{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]} +{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]} +{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]} +{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]} +{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]} +{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]} +{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}} +{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}} +{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]} +{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}]} +{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]} +{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]} +{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]} +{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]} +{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]} +{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]} +{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]} +{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]} +{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}} +{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}} +{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]} +{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}], "6EigJgc8sxf7VIfMkDl":[]} +{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]} +{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]} +{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]} +{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]} +{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]} +{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]} +{} +{} +{} +{"cuNC":"j#Q*KbvL"} +{} +{} +{} +{"e2mZBQPL9f0pgd0sXR":false} +{} +{} +{} +{} +{} +{} +{} +{} +{} +{} +{} +{} +730 +200 diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.sql b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql new file mode 100644 index 00000000000..6db0c69dbac --- /dev/null +++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql @@ -0,0 +1,106 @@ +-- Tags: no-parallel, no-replicated-database: Named collection is used + +SET allow_experimental_object_type = 1; +-- + +DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer; +CREATE NAMED COLLECTION 02918_json_fuzzer AS json_str='{}'; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=54321) LIMIT 10; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"ClickHouse":"Is Fast"}', random_seed=1337) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"students":[{"name":"Alice"}, {"name":"Bob"}]}', random_seed=1337) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=true) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=false) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, + json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', + random_seed=123456, + reuse_output=0, + max_output_length=128) LIMIT 20; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', + random_seed=123456, + reuse_output=0, + max_output_length=65536, + max_nesting_level=10, + max_array_size=20) LIMIT 20; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + random_seed=6667, + max_nesting_level=0) LIMIT 10; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + random_seed=6667, + max_object_size=0, + max_array_size=0) LIMIT 10; + +-- +DROP TABLE IF EXISTS 02918_table_str; +CREATE TABLE 02918_table_str (json_str String) Engine=Memory; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=123, reuse_output=true) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666, + min_key_length=1, + max_key_length=5) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + max_nesting_level=128, + reuse_output=true, + random_seed=6666, + min_key_length=5, + max_key_length=5) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666, + reuse_output=1, + probability=0.5, + max_output_length=65536, + max_nesting_level=18446744073709551615, + max_array_size=18446744073709551615, + max_object_size=18446744073709551615, + max_key_length=65536, + max_string_value_length=65536) LIMIT 100; + +SELECT count() FROM 02918_table_str; + +DROP TABLE IF EXISTS 02918_table_str; + +-- +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length="Hello") LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=10) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=-0.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_string_value_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=0) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=11) LIMIT 10; -- { serverError BAD_ARGUMENTS } + +-- +DROP TABLE IF EXISTS 02918_table_obj; +CREATE TABLE 02918_table_obj (json_obj Object('json')) Engine=Memory; + +INSERT INTO 02918_table_obj SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 27, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=12345) LIMIT 200; +SELECT count() FROM 02918_table_obj; + +DROP TABLE IF EXISTS 02918_table_obj; + +DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer; diff --git a/tests/queries/0_stateless/02918_gorilla_invalid_file.reference b/tests/queries/0_stateless/02918_gorilla_invalid_file.reference new file mode 100644 index 00000000000..2574a09f166 --- /dev/null +++ b/tests/queries/0_stateless/02918_gorilla_invalid_file.reference @@ -0,0 +1 @@ +Exc diff --git a/tests/queries/0_stateless/02918_gorilla_invalid_file.sh b/tests/queries/0_stateless/02918_gorilla_invalid_file.sh new file mode 100755 index 00000000000..b877e59b483 --- /dev/null +++ b/tests/queries/0_stateless/02918_gorilla_invalid_file.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo -ne 'checksumchecksum\x95\xd3\x02\x00\x00\x01\x00\x00\x00\x0800\xff\xff\xff\xff\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08' | + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1&http_native_compression_disable_checksumming_on_decompress=1" --data-binary @- 2>&1 | grep -oF 'Exc' diff --git a/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference new file mode 100644 index 00000000000..323b12c173a --- /dev/null +++ b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference @@ -0,0 +1,4 @@ +1 2504 1 +ok +1 200 1 1 +ok diff --git a/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh new file mode 100755 index 00000000000..43594a45a1e --- /dev/null +++ b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +EXCEPTION_TEXT="VIOLATED_CONSTRAINT" +EXCEPTION_SUCCESS_TEXT=ok + +# CollapsingSortedAlgorithm::merge() also has a check for sign column value +# optimize_on_insert = 0 is required to avoid this automatic merge behavior +$CLICKHOUSE_CLIENT --query="SET optimize_on_insert=0;" + + +# CollapsingMergeTree +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS collapsing_merge_tree;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE collapsing_merge_tree +( + Key UInt32, + Count UInt16, + Sign Int8 +) +ENGINE=CollapsingMergeTree(Sign) ORDER BY Key +SETTINGS add_implicit_sign_column_constraint_for_collapsing_engine=1;" + +# Should succeed +$CLICKHOUSE_CLIENT --query="INSERT INTO collapsing_merge_tree VALUES (1, 2504, 1);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM collapsing_merge_tree;" + +# Should throw an exception +$CLICKHOUSE_CLIENT --query="INSERT INTO collapsing_merge_tree VALUES (1, 2504, 5);" 2>&1 \ + | grep -q "$EXCEPTION_TEXT" && echo "$EXCEPTION_SUCCESS_TEXT" || echo "Did not throw an exception" + +$CLICKHOUSE_CLIENT --query="DROP TABLE collapsing_merge_tree;" + + +# VersionedCollapsingMergeTree +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS versioned_collapsing_merge_tree;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE versioned_collapsing_merge_tree +( + Key UInt32, + Count UInt8, + Sign Int8, + Version UInt8 +) +ENGINE=VersionedCollapsingMergeTree(Sign, Version) ORDER BY Key +SETTINGS add_implicit_sign_column_constraint_for_collapsing_engine=1;" + +# Should succeed +$CLICKHOUSE_CLIENT --query="INSERT INTO versioned_collapsing_merge_tree VALUES (1, 2504, 1, 1);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM versioned_collapsing_merge_tree;" + +# Should throw an exception +$CLICKHOUSE_CLIENT --query="INSERT INTO versioned_collapsing_merge_tree VALUES (1, 2504, 5, 1);" 2>&1 \ + | grep -q "$EXCEPTION_TEXT" && echo "$EXCEPTION_SUCCESS_TEXT" || echo "Did not throw an exception" + +$CLICKHOUSE_CLIENT --query="DROP TABLE versioned_collapsing_merge_tree;" diff --git a/tests/queries/0_stateless/02918_join_pm_lc_crash.reference b/tests/queries/0_stateless/02918_join_pm_lc_crash.reference new file mode 100644 index 00000000000..7523f1c1774 --- /dev/null +++ b/tests/queries/0_stateless/02918_join_pm_lc_crash.reference @@ -0,0 +1,12 @@ +0 + +0 +0 + +0 +0 + +\N +0 + +\N diff --git a/tests/queries/0_stateless/02918_join_pm_lc_crash.sql b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql new file mode 100644 index 00000000000..123208ee981 --- /dev/null +++ b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql @@ -0,0 +1,31 @@ + +SET joined_subquery_requires_alias = 0, join_algorithm = 'partial_merge'; + +SET allow_experimental_analyzer = 0, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 0, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 1, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 1, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + diff --git a/tests/queries/0_stateless/02918_sqlite_path_check.reference b/tests/queries/0_stateless/02918_sqlite_path_check.reference new file mode 100644 index 00000000000..56b832a6469 --- /dev/null +++ b/tests/queries/0_stateless/02918_sqlite_path_check.reference @@ -0,0 +1,2 @@ +SQLite database file path '/etc/passwd' must be inside 'user_files' directory. (PATH_ACCESS_DENIED) +SQLite database file path '../../../../etc/passwd' must be inside 'user_files' directory. (PATH_ACCESS_DENIED) diff --git a/tests/queries/0_stateless/02918_sqlite_path_check.sh b/tests/queries/0_stateless/02918_sqlite_path_check.sh new file mode 100755 index 00000000000..fa74b9ecfc8 --- /dev/null +++ b/tests/queries/0_stateless/02918_sqlite_path_check.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: Fast tests don't build external libraries (SQLite) + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +function get_exception_message() +{ + $CLICKHOUSE_CLIENT --query "$1" |& head -n1 | sed 's/.*DB::Exception: \(.*\) (version.*/\1/g' +} + +get_exception_message "Select * from sqlite('/etc/passwd', 'something');" +get_exception_message "Select * from sqlite('../../../../etc/passwd', 'something');" diff --git a/tests/queries/0_stateless/02918_wrong_dictionary_source.reference b/tests/queries/0_stateless/02918_wrong_dictionary_source.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02918_wrong_dictionary_source.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02918_wrong_dictionary_source.sql b/tests/queries/0_stateless/02918_wrong_dictionary_source.sql new file mode 100644 index 00000000000..e729ef74c61 --- /dev/null +++ b/tests/queries/0_stateless/02918_wrong_dictionary_source.sql @@ -0,0 +1,11 @@ +DROP DICTIONARY IF EXISTS id_value_dictionary; +DROP TABLE IF EXISTS source_table; + +CREATE TABLE source_table(id UInt64, value String) ENGINE = MergeTree ORDER BY tuple(); + +-- There is no "CLICKHOUSEX" dictionary source, so the next query must fail even if `dictionaries_lazy_load` is enabled. +CREATE DICTIONARY id_value_dictionary(id UInt64, value String) PRIMARY KEY id SOURCE(CLICKHOUSEX(TABLE 'source_table')) LIFETIME(MIN 0 MAX 1000) LAYOUT(FLAT()); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } + +SELECT count() FROM system.dictionaries WHERE name=='id_value_dictionary' AND database==currentDatabase(); + +DROP TABLE source_table; diff --git a/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference new file mode 100644 index 00000000000..a1ce6a27bb4 --- /dev/null +++ b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference @@ -0,0 +1,3 @@ +0 0 false +1 1 true +0 0 false diff --git a/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql new file mode 100644 index 00000000000..f531ec0311d --- /dev/null +++ b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS crash_02919; + +CREATE TABLE crash_02919 ( + b Int64, + c Nullable(Int64) MATERIALIZED b, + d Nullable(Bool) MATERIALIZED b +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO crash_02919 VALUES (0); +SELECT b, c, d FROM crash_02919; +ALTER TABLE crash_02919 UPDATE b = 1 WHERE 1=1 SETTINGS mutations_sync = 1; +SELECT b, c, d FROM crash_02919; +ALTER TABLE crash_02919 UPDATE b = 0.1 WHERE 1=1 SETTINGS mutations_sync = 1; +SELECT b, c, d FROM crash_02919; + +DROP TABLE crash_02919; diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.reference b/tests/queries/0_stateless/02919_storage_fuzzjson.reference new file mode 100644 index 00000000000..a134ce52c11 --- /dev/null +++ b/tests/queries/0_stateless/02919_storage_fuzzjson.reference @@ -0,0 +1,3 @@ +100 +100 +100 diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.sql b/tests/queries/0_stateless/02919_storage_fuzzjson.sql new file mode 100644 index 00000000000..80b4a406a08 --- /dev/null +++ b/tests/queries/0_stateless/02919_storage_fuzzjson.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS 02919_test_table_noarg; +CREATE TABLE 02919_test_table_noarg(str String) ENGINE = FuzzJSON('{}'); + +SELECT count() FROM (SELECT * FROM 02919_test_table_noarg LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_noarg; + +-- +DROP TABLE IF EXISTS 02919_test_table_valid_args; +CREATE TABLE 02919_test_table_valid_args(str String) ENGINE = FuzzJSON( + '{"pet":"rat"}', NULL); + +SELECT count() FROM (SELECT * FROM 02919_test_table_valid_args LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_valid_args; + +-- +DROP TABLE IF EXISTS 02919_test_table_reuse_args; +CREATE TABLE 02919_test_table_reuse_args(str String) ENGINE = FuzzJSON( + '{ + "name": "Jane Doe", + "age": 30, + "city": "New York", + "contacts": { + "email": "jane@example.com", + "phone": "+1234567890" + }, + "skills": [ + "JavaScript", + "Python", + { + "frameworks": ["React", "Django"] + } + ], + "projects": [ + {"name": "Project A", "status": "completed"}, + {"name": "Project B", "status": "in-progress"} + ] + }', + 12345); + +SELECT count() FROM (SELECT * FROM 02919_test_table_reuse_args LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_reuse_args; diff --git a/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference new file mode 100644 index 00000000000..9874bc57142 --- /dev/null +++ b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference @@ -0,0 +1,52 @@ + +message Message +{ + message H + { + uint32 k = 1; + } + H h = 1; + message A + { + uint32 g = 1; + message B + { + uint32 c = 1; + uint32 f = 2; + message D + { + uint32 e = 1; + } + D d = 3; + } + B b = 2; + } + A a = 2; +} +46 (45,(42,44,43)) + +struct Message +{ + struct H + { + k @0 : UInt8; + } + h @0 : H; + struct A + { + g @0 : UInt8; + struct B + { + c @0 : UInt8; + f @1 : UInt8; + struct D + { + e @0 : UInt8; + } + d @2 : D; + } + b @1 : B; + } + a @1 : A; +} +(46) (45,(42,44,(43))) diff --git a/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh new file mode 100755 index 00000000000..aee6b866719 --- /dev/null +++ b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME-schema +FILE=$CLICKHOUSE_TEST_UNIQUE_NAME + +$CLICKHOUSE_LOCAL -q "select 42 as \`a.b.c\`, 43 as \`a.b.d.e\`, 44 as \`a.b.f\`, 45 as \`a.g\`, 46 as \`h.k\` format Protobuf settings output_format_schema='$SCHEMA_FILE.proto'" > $FILE.pb +tail -n +2 $SCHEMA_FILE.proto +$CLICKHOUSE_LOCAL -q "select * from file('$FILE.pb') settings format_schema='$SCHEMA_FILE:Message'" + +$CLICKHOUSE_LOCAL -q "select 42 as a_b_c, 43 as a_b_d_e, 44 as a_b_f, 45 as a_g, 46 as h_k format CapnProto settings output_format_schema='$SCHEMA_FILE.capnp'" > $FILE.capnp +tail -n +2 $SCHEMA_FILE.capnp +$CLICKHOUSE_LOCAL -q "select * from file('$FILE.capnp') settings format_schema='$SCHEMA_FILE:Message'" + +rm $SCHEMA_FILE* +rm $FILE.* + diff --git a/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference new file mode 100644 index 00000000000..62245f5d176 --- /dev/null +++ b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference @@ -0,0 +1,9 @@ +314776434768051644139306697240981192872 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +314776434768051644139306697240981192872 14776434768051644139306697240981192872314776434768051644139306697240981192872 115 115 +-25505932152886819324067910190787018584 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +-25505932152886819324067910190787018584 14776434768051644139306697240981192872314776434768051644139306697240981192872 99 99 +314776434768051644139306697240981192872 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +314776434768051644139306697240981192872 14776434768051644139306697240981192872314776434768051644139306697240981192872 115 115 diff --git a/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql new file mode 100644 index 00000000000..6f241e104b6 --- /dev/null +++ b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql @@ -0,0 +1,12 @@ +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::Int128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::Int128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + diff --git a/tests/queries/0_stateless/02921_database_filesystem_path_check.reference b/tests/queries/0_stateless/02921_database_filesystem_path_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02921_database_filesystem_path_check.sql b/tests/queries/0_stateless/02921_database_filesystem_path_check.sql new file mode 100644 index 00000000000..d62b629df7b --- /dev/null +++ b/tests/queries/0_stateless/02921_database_filesystem_path_check.sql @@ -0,0 +1,2 @@ +create database db_filesystem ENGINE=Filesystem('/etc'); -- { serverError BAD_ARGUMENTS } +create database db_filesystem ENGINE=Filesystem('../../../../../../../../etc'); -- { serverError BAD_ARGUMENTS } \ No newline at end of file diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference new file mode 100644 index 00000000000..2f319dfb812 --- /dev/null +++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference @@ -0,0 +1,12 @@ +2 +3 +4 +2 +3 +4 +2 +3 +4 +2 +3 +4 diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh new file mode 100755 index 00000000000..5dd58ec0d7f --- /dev/null +++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "1" > $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv +echo "12" > $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv +echo "123" > $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv + +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" +# Run this query twice to check correct behaviour when cache is used +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" + +# Test the same fils in archive +tar -cf $CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv + +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" + +rm $CLICKHOUSE_TEST_UNIQUE_NAME.* + diff --git a/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference new file mode 100644 index 00000000000..39443245b6c --- /dev/null +++ b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference @@ -0,0 +1,4 @@ +12 1 +12 2 +100 1 +100 2 diff --git a/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql new file mode 100644 index 00000000000..5d80a5fbea6 --- /dev/null +++ b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql @@ -0,0 +1,2 @@ +SELECT length(fuzzBits('stringstring', 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a; +SELECT length(fuzzBits('stringstring'::FixedString(100), 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a \ No newline at end of file diff --git a/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.reference b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.reference new file mode 100644 index 00000000000..f9c7b26d245 --- /dev/null +++ b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.reference @@ -0,0 +1,13 @@ +0 +0 + +0 +0 + +0 +0 \N + +0 \N +0 + +0 diff --git a/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql new file mode 100644 index 00000000000..987515527f0 --- /dev/null +++ b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql @@ -0,0 +1,24 @@ +#!/usr/bin/env -S ${HOME}/clickhouse-client --progress --queries-file + + +SELECT count(NULL) FROM remote('127.0.0.{1,2}', numbers(3)) GROUP BY number % 2 WITH TOTALS; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (`n` UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 SELECT * FROM numbers(10); + +SET + allow_experimental_parallel_reading_from_replicas=1, + max_parallel_replicas=2, + use_hedged_requests=0, + cluster_for_parallel_replicas='parallel_replicas', + parallel_replicas_for_non_replicated_merge_tree=1 +; + +SELECT count(NULL) FROM t1 WITH TOTALS; +SELECT count(NULL as a), a FROM t1 WITH TOTALS; + +-- result differs in old and new analyzer: +-- SELECT count(NULL as a), sum(a) FROM t1 WITH TOTALS; + +SELECT uniq(NULL) FROM t1 WITH TOTALS; diff --git a/tests/queries/0_stateless/02922_server_exit_code.reference b/tests/queries/0_stateless/02922_server_exit_code.reference new file mode 100644 index 00000000000..7326d960397 --- /dev/null +++ b/tests/queries/0_stateless/02922_server_exit_code.reference @@ -0,0 +1 @@ +Ok diff --git a/tests/queries/0_stateless/02922_server_exit_code.sh b/tests/queries/0_stateless/02922_server_exit_code.sh new file mode 100755 index 00000000000..60049902410 --- /dev/null +++ b/tests/queries/0_stateless/02922_server_exit_code.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# We will check that the server's exit code corresponds to the exception code if it was terminated after exception. +# In this example, we provide an invalid path to the server's config, ignore its logs and check the exit code. +# The exception code is 400 = CANNOT_STAT, so the exit code will be 400 % 256. + +${CLICKHOUSE_SERVER_BINARY} -- --path /dev/null 2>/dev/null; [[ "$?" == "$((400 % 256))" ]] && echo 'Ok' || echo 'Fail' diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference new file mode 100644 index 00000000000..369837adcbb --- /dev/null +++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference @@ -0,0 +1,12 @@ +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh new file mode 100755 index 00000000000..51de2117dca --- /dev/null +++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" +$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" + +$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" +$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" + diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.reference b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.sql b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql new file mode 100644 index 00000000000..288bed9e491 --- /dev/null +++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql @@ -0,0 +1,12 @@ +--https://github.com/ClickHouse/ClickHouse/issues/5323 +CREATE TABLE test_bug_optimization +( + `path` String +) +ENGINE = MergeTree +ORDER BY path; + +WITH (path = 'test1') OR match(path, 'test2') OR (match(path, 'test3') AND match(path, 'test2')) OR match(path, 'test4') OR (path = 'test5') OR (path = 'test6') AS alias_in_error +SELECT count(1) +FROM test_bug_optimization +WHERE alias_in_error; diff --git a/tests/queries/0_stateless/02923_explain_expired_context.reference b/tests/queries/0_stateless/02923_explain_expired_context.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02923_explain_expired_context.sql b/tests/queries/0_stateless/02923_explain_expired_context.sql new file mode 100644 index 00000000000..68277508eb2 --- /dev/null +++ b/tests/queries/0_stateless/02923_explain_expired_context.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/51321 +EXPLAIN ESTIMATE SELECT any(toTypeName(s)) FROM (SELECT 'bbbbbbbb', toTypeName(s), CAST('', 'LowCardinality(String)'), NULL, CAST('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 'String') AS s) AS t1 FULL OUTER JOIN (SELECT CAST('bbbbb\0\0bbb\0bb\0bb', 'LowCardinality(String)'), CAST(CAST('a', 'String'), 'LowCardinality(String)') AS s GROUP BY CoNnEcTiOn_Id()) AS t2 USING (s) WITH TOTALS; +EXPLAIN ESTIMATE SELECT any(s) FROM (SELECT '' AS s) AS t1 JOIN (SELECT '' AS s GROUP BY connection_id()) AS t2 USING (s); diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference new file mode 100644 index 00000000000..bc42121fb39 --- /dev/null +++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference @@ -0,0 +1,6 @@ +2 +3 +4 +2 +3 +4 diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh new file mode 100755 index 00000000000..dc01687772f --- /dev/null +++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, use-hdfs + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv') select 1 settings hdfs_truncate_on_insert=1;" +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv') select 11 settings hdfs_truncate_on_insert=1;" +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv') select 111 settings hdfs_truncate_on_insert=1;" + + +$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size" +$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size" + diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference b/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql new file mode 100644 index 00000000000..4134a42c599 --- /dev/null +++ b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql @@ -0,0 +1,22 @@ +--https://github.com/ClickHouse/ClickHouse/issues/47366 +SELECT + id % 255, + toTypeName(d.id) +FROM +( + SELECT + toLowCardinality(1048577) AS id, + toLowCardinality(9223372036854775807) AS value + GROUP BY + GROUPING SETS ( + (toLowCardinality(1024)), + (id % 10.0001), + ((id % 2147483646) != -9223372036854775807), + ((id % -1) != 255)) + ) AS a + SEMI LEFT JOIN +( + SELECT toLowCardinality(9223372036854775807) AS id + WHERE (id % 2147483646) != NULL +) AS d USING (id) +SETTINGS join_use_nulls=1; diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.reference b/tests/queries/0_stateless/02930_client_file_log_comment.reference new file mode 100644 index 00000000000..09639302c0f --- /dev/null +++ b/tests/queries/0_stateless/02930_client_file_log_comment.reference @@ -0,0 +1,4 @@ +42 +select 42\n /dev/stdin +4242 +select 4242\n foo diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh new file mode 100755 index 00000000000..c425f28ecbe --- /dev/null +++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --queries-file /dev/stdin <<<'select 42' +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42\n' and type != 'QueryStart'; +" + +$CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 4242' +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242\n' and type != 'QueryStart'; +" diff --git a/tests/queries/0_stateless/02931_file_cluster.reference b/tests/queries/0_stateless/02931_file_cluster.reference new file mode 100644 index 00000000000..e7ad41e3ba1 --- /dev/null +++ b/tests/queries/0_stateless/02931_file_cluster.reference @@ -0,0 +1,10 @@ +file1 1 +file2 2 +file3 3 +file4 4 +file5 5 +file6 6 +file7 7 +file8 8 +file9 9 +file10 10 diff --git a/tests/queries/0_stateless/02931_file_cluster.sh b/tests/queries/0_stateless/02931_file_cluster.sh new file mode 100755 index 00000000000..e628687a42a --- /dev/null +++ b/tests/queries/0_stateless/02931_file_cluster.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') + +mkdir -p "${USER_FILES_PATH}"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"/ + +for i in {1..10} +do + echo \"file"$i"\","$i" > "${USER_FILES_PATH}"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"/file"$i".csv +done + +$CLICKHOUSE_CLIENT --query "SELECT * FROM fileCluster('test_cluster_two_shards_localhost', '${CLICKHOUSE_TEST_UNIQUE_NAME}/file{1..10}.csv', 'CSV', 's String, i UInt32') ORDER BY (i, s)" + +rm "${USER_FILES_PATH}"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"/file*.csv diff --git a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.reference b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.reference new file mode 100644 index 00000000000..35ef86f5339 --- /dev/null +++ b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.reference @@ -0,0 +1 @@ +1 2 4 diff --git a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh new file mode 100755 index 00000000000..27ef26dd9a5 --- /dev/null +++ b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv +$CLICKHOUSE_LOCAL -nm -q " +create table test (x UInt64, y UInt32, size UInt64) engine=Memory; +insert into test select c1, c2, _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') settings use_structure_from_insertion_table_in_table_functions=1; +select * from test; +" + diff --git a/tests/queries/0_stateless/data_csv/1m_rows_cr_end_of_line.csv.xz b/tests/queries/0_stateless/data_csv/1m_rows_cr_end_of_line.csv.xz new file mode 100644 index 00000000000..b1663e210b7 Binary files /dev/null and b/tests/queries/0_stateless/data_csv/1m_rows_cr_end_of_line.csv.xz differ diff --git a/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto b/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto index e5283907936..7f72d599707 100644 --- a/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto +++ b/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -import "wrappers.proto"; +import "google/protobuf/wrappers.proto"; message Message { google.protobuf.StringValue str = 1; diff --git a/tests/queries/0_stateless/format_schemas/wrappers.proto b/tests/queries/0_stateless/format_schemas/wrappers.proto deleted file mode 100644 index c571f096879..00000000000 --- a/tests/queries/0_stateless/format_schemas/wrappers.proto +++ /dev/null @@ -1,123 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. -// -// These wrappers have no meaningful use within repeated fields as they lack -// the ability to detect presence on individual elements. -// These wrappers have no meaningful use within a map or a oneof since -// individual entries of a map or fields of a oneof can already detect presence. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} \ No newline at end of file diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 4e1184cc9a5..ec44a1e1de9 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -9,11 +9,7 @@ if (ENABLE_CLICKHOUSE_SELF_EXTRACTING) add_subdirectory (self-extracting-executable) endif () -# Utils used in package -add_subdirectory (config-processor) -add_subdirectory (report) - -# Not used in package +# Not used in packages if (ENABLE_UTILS) add_subdirectory (compressor) add_subdirectory (corrector_utf8) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 7eaafe8a777..fc052e225ce 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2633 +personal_ws-1.1 en 2646 AArch ACLs ALTERs @@ -261,6 +261,7 @@ FOSDEM FQDN Failover FarmHash +FileCluster FileLog FilesystemCacheBytes FilesystemCacheElements @@ -441,6 +442,7 @@ Kolmogorov Kubernetes LDAP LGPL +LIMITs LLDB LLVM's LOCALTIME @@ -864,6 +866,7 @@ SystemReplicasThreadsActive TABLUM TCPConnection TCPThreads +TDigest TINYINT TKSV TLSv @@ -1471,6 +1474,7 @@ fastops fcoverage fibonacci fifo +fileCluster filelog filesystem filesystemAvailable @@ -1502,6 +1506,7 @@ formatRowNoNewline formated formatschema formatter +formatters freezed fromDaysSinceYearZero fromModifiedJulianDay @@ -1512,6 +1517,7 @@ fromUnixTimestampInJodaSyntax fsync func fuzzBits +fuzzJSON fuzzer fuzzers gRPC @@ -1533,6 +1539,7 @@ geohashEncode geohashesInBox geoip geospatial +getClientHTTPHeader getMacro getOSKernelVersion getServerPort @@ -1555,6 +1562,7 @@ graphql greatCircleAngle greatCircleDistance greaterOrEquals +greaterorequals greenspace groupArray groupArrayInsertAt @@ -1562,6 +1570,7 @@ groupArrayLast groupArrayMovingAvg groupArrayMovingSum groupArraySample +groupArraySorted groupBitAnd groupBitOr groupBitXor @@ -1576,6 +1585,7 @@ grouparraylast grouparraymovingavg grouparraymovingsum grouparraysample +grouparraysorted groupbitand groupbitmap groupbitmapand @@ -1740,6 +1750,7 @@ lemmatize lemmatized lengthUTF lessOrEquals +lessorequals levenshtein levenshteinDistance lexicographically @@ -1918,6 +1929,7 @@ notEquals notILike notIn notLike +notequals notretry nowInBlock ntile @@ -2219,6 +2231,7 @@ seektable sequenceCount sequenceMatch sequenceNextNode +seriesPeriodDetectFFT serverTimeZone serverTimezone serverUUID @@ -2348,6 +2361,7 @@ subtractSeconds subtractWeeks subtractYears subtree +subtrees subtype sudo sumCount diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index c598ff0e99c..6b3fe86d310 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -2,8 +2,20 @@ # Check that there are no new translation units compiled to an object file larger than a certain size. +TU_EXCLUDES=( + CastOverloadResolver + AggregateFunctionMax + AggregateFunctionMin + AggregateFunctionUniq + FunctionsConversion + + RangeHashedDictionary + + Aggregator +) + if find $1 -name '*.o' | xargs wc -c | grep -v total | sort -rn | awk '{ if ($1 > 50000000) print }' \ - | grep -v -P 'CastOverloadResolver|AggregateFunctionMax|AggregateFunctionMin|RangeHashedDictionary|Aggregator|AggregateFunctionUniq' + | grep -v -f <(printf "%s\n" "${TU_EXCLUDES[@]}") then echo "^ It's not allowed to have so large translation units." exit 1 diff --git a/utils/config-processor/CMakeLists.txt b/utils/config-processor/CMakeLists.txt deleted file mode 100644 index 80c3535ef4e..00000000000 --- a/utils/config-processor/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -clickhouse_add_executable (config-processor config-processor.cpp) -target_link_libraries(config-processor PRIVATE dbms) diff --git a/utils/config-processor/config-processor.cpp b/utils/config-processor/config-processor.cpp deleted file mode 100644 index 242a6782b3b..00000000000 --- a/utils/config-processor/config-processor.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include -#include - -int main(int argc, char ** argv) -{ - try - { - if (argc != 2) - { - std::cerr << "usage: " << argv[0] << " path" << std::endl; - return 3; - } - - DB::ConfigProcessor processor(argv[1], false, true); - DB::XMLDocumentPtr document = processor.processConfig(); - Poco::XML::DOMWriter().writeNode(std::cout, document); - } - catch (Poco::Exception & e) - { - std::cerr << "Exception: " << e.displayText() << std::endl; - return 1; - } - catch (std::exception & e) - { - std::cerr << "std::exception: " << e.what() << std::endl; - return 3; - } - catch (...) - { - std::cerr << "Some exception" << std::endl; - return 2; - } - - return 0; -} diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index 13855c6d94e..611ca948c53 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -10,10 +10,12 @@ #include #include + namespace CurrentMetrics { extern const Metric LocalThread; extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; } namespace DB::ErrorCodes @@ -106,7 +108,7 @@ Runner::Runner( std::cerr << "---- Run options ----\n" << std::endl; - pool.emplace(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency); + pool.emplace(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, concurrency); queue.emplace(concurrency); } @@ -461,4 +463,3 @@ Runner::~Runner() pool->wait(); generator->cleanup(*connections[0]); } - diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 014ee5e9a17..ebe138d597a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,12 +1,15 @@ +v23.10.5.20-stable 2023-11-25 v23.10.4.25-stable 2023-11-17 v23.10.3.5-stable 2023-11-10 v23.10.2.13-stable 2023-11-08 v23.10.1.1976-stable 2023-11-02 +v23.9.6.20-stable 2023-11-25 v23.9.5.29-stable 2023-11-17 v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.8.20-lts 2023-11-25 v23.8.7.24-lts 2023-11-17 v23.8.6.16-lts 2023-11-08 v23.8.5.16-lts 2023-10-31 @@ -34,6 +37,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.18.15-lts 2023-11-25 v23.3.17.13-lts 2023-11-17 v23.3.16.7-lts 2023-11-08 v23.3.15.29-lts 2023-10-31 diff --git a/utils/prepare-time-trace/prepare-time-trace.sh b/utils/prepare-time-trace/prepare-time-trace.sh index 5f4aad4c0b9..812928e8bd8 100755 --- a/utils/prepare-time-trace/prepare-time-trace.sh +++ b/utils/prepare-time-trace/prepare-time-trace.sh @@ -8,7 +8,7 @@ # See also https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview -< str: with open(VERSIONS_FILE, "r", encoding="utf-8") as fd: versions = [line.split(maxsplit=1)[0][1:] for line in fd.readlines()] - # The versions in VERSIONS_FILE are ordered ascending, so the first one is - # the greatest one. We may have supported versions in the previous year - greatest_year = int(versions[0].split(".", maxsplit=1)[0]) - unsupported_year = greatest_year - 2 + supported_year = 0 # set automatically when all supported versions are filled # 3 regular versions regular = [] # type: List[str] max_regular = 3 @@ -82,14 +79,12 @@ def generate_supported_versions() -> str: lts.append(version) to_append = f"| {version} | ✔️ |" if to_append: - if len(regular) == max_regular and len(lts) == max_lts: - # if we reached the max number of supported versions, the rest - # are unsopported, so year.* will be used - unsupported_year = min(greatest_year - 1, year) + if len(regular) == max_regular or len(lts) == max_lts: + supported_year = year table.append(to_append) continue - if year <= unsupported_year: - # The whole year is unsopported + if year < supported_year: + # The whole year is unsupported version = f"{year}.*" if not version in unsupported: unsupported.append(version)